2024-12-16 17:56:24,232 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-16 17:56:24,245 main DEBUG Took 0.010249 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-16 17:56:24,245 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-16 17:56:24,245 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-16 17:56:24,246 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-16 17:56:24,247 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,253 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-16 17:56:24,264 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,266 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,266 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,267 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,267 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,267 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,268 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,268 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,269 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,269 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,270 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,270 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,270 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,271 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,271 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,271 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,271 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,272 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,272 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,272 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,273 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,273 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,273 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,273 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 17:56:24,274 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,274 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-16 17:56:24,275 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 17:56:24,276 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-16 17:56:24,278 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-16 17:56:24,278 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-16 17:56:24,279 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-16 17:56:24,280 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-16 17:56:24,287 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-16 17:56:24,289 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-16 17:56:24,291 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-16 17:56:24,291 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-16 17:56:24,292 main DEBUG createAppenders(={Console}) 2024-12-16 17:56:24,292 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-16 17:56:24,292 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-16 17:56:24,293 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-16 17:56:24,293 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-16 17:56:24,293 main DEBUG OutputStream closed 2024-12-16 17:56:24,294 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-16 17:56:24,294 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-16 17:56:24,294 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-16 17:56:24,357 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-16 17:56:24,359 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-16 17:56:24,360 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-16 17:56:24,361 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-16 17:56:24,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-16 17:56:24,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-16 17:56:24,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-16 17:56:24,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-16 17:56:24,363 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-16 17:56:24,363 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-16 17:56:24,363 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-16 17:56:24,364 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-16 17:56:24,364 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-16 17:56:24,364 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-16 17:56:24,364 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-16 17:56:24,365 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-16 17:56:24,365 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-16 17:56:24,366 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-16 17:56:24,368 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16 17:56:24,368 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-16 17:56:24,368 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-16 17:56:24,369 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-16T17:56:24,609 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d 2024-12-16 17:56:24,612 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-16 17:56:24,612 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16T17:56:24,620 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-16T17:56:24,643 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T17:56:24,646 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4, deleteOnExit=true 2024-12-16T17:56:24,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T17:56:24,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/test.cache.data in system properties and HBase conf 2024-12-16T17:56:24,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T17:56:24,648 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/hadoop.log.dir in system properties and HBase conf 2024-12-16T17:56:24,649 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T17:56:24,650 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T17:56:24,650 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T17:56:24,745 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-16T17:56:24,827 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T17:56:24,831 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T17:56:24,832 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T17:56:24,832 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T17:56:24,833 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T17:56:24,833 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T17:56:24,834 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T17:56:24,834 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T17:56:24,835 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T17:56:24,835 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T17:56:24,836 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/nfs.dump.dir in system properties and HBase conf 2024-12-16T17:56:24,836 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/java.io.tmpdir in system properties and HBase conf 2024-12-16T17:56:24,837 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T17:56:24,837 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T17:56:24,838 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T17:56:25,715 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-16T17:56:25,779 INFO [Time-limited test {}] log.Log(170): Logging initialized @2185ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-16T17:56:25,839 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T17:56:25,895 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T17:56:25,915 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T17:56:25,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T17:56:25,917 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T17:56:25,930 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T17:56:25,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ad156f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/hadoop.log.dir/,AVAILABLE} 2024-12-16T17:56:25,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c273041{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T17:56:26,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7096be9b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/java.io.tmpdir/jetty-localhost-37765-hadoop-hdfs-3_4_1-tests_jar-_-any-476469628534594735/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T17:56:26,114 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16dcfbe1{HTTP/1.1, (http/1.1)}{localhost:37765} 2024-12-16T17:56:26,114 INFO [Time-limited test {}] server.Server(415): Started @2521ms 2024-12-16T17:56:26,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T17:56:26,567 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T17:56:26,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T17:56:26,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T17:56:26,570 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T17:56:26,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72940c9d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/hadoop.log.dir/,AVAILABLE} 2024-12-16T17:56:26,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6311a0d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T17:56:26,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e63fd41{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/java.io.tmpdir/jetty-localhost-41811-hadoop-hdfs-3_4_1-tests_jar-_-any-11924517379632593991/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T17:56:26,669 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f715f5d{HTTP/1.1, (http/1.1)}{localhost:41811} 2024-12-16T17:56:26,670 INFO [Time-limited test {}] server.Server(415): Started @3076ms 2024-12-16T17:56:26,720 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T17:56:27,478 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/dfs/data/data1/current/BP-1899768724-172.17.0.2-1734371785309/current, will proceed with Du for space computation calculation, 2024-12-16T17:56:27,478 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/dfs/data/data2/current/BP-1899768724-172.17.0.2-1734371785309/current, will proceed with Du for space computation calculation, 2024-12-16T17:56:27,502 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T17:56:27,537 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d 2024-12-16T17:56:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdda8707da84804c0 with lease ID 0x823a0a3edcf8f4e1: Processing first storage report for DS-89ba1694-70fd-483d-b6da-33a13e44fdb5 from datanode DatanodeRegistration(127.0.0.1:41817, datanodeUuid=aed29c79-9217-44a3-9ef6-9f4b96769121, infoPort=38687, infoSecurePort=0, ipcPort=34165, storageInfo=lv=-57;cid=testClusterID;nsid=1049714200;c=1734371785309) 2024-12-16T17:56:27,543 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdda8707da84804c0 with lease ID 0x823a0a3edcf8f4e1: from storage DS-89ba1694-70fd-483d-b6da-33a13e44fdb5 node DatanodeRegistration(127.0.0.1:41817, datanodeUuid=aed29c79-9217-44a3-9ef6-9f4b96769121, infoPort=38687, infoSecurePort=0, ipcPort=34165, storageInfo=lv=-57;cid=testClusterID;nsid=1049714200;c=1734371785309), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T17:56:27,543 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdda8707da84804c0 with lease ID 0x823a0a3edcf8f4e1: Processing first storage report for DS-d245fa7d-b809-48b7-b6a7-7e092eed1859 from datanode DatanodeRegistration(127.0.0.1:41817, datanodeUuid=aed29c79-9217-44a3-9ef6-9f4b96769121, infoPort=38687, infoSecurePort=0, ipcPort=34165, storageInfo=lv=-57;cid=testClusterID;nsid=1049714200;c=1734371785309) 2024-12-16T17:56:27,543 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdda8707da84804c0 with lease ID 0x823a0a3edcf8f4e1: from storage DS-d245fa7d-b809-48b7-b6a7-7e092eed1859 node DatanodeRegistration(127.0.0.1:41817, datanodeUuid=aed29c79-9217-44a3-9ef6-9f4b96769121, infoPort=38687, infoSecurePort=0, ipcPort=34165, storageInfo=lv=-57;cid=testClusterID;nsid=1049714200;c=1734371785309), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T17:56:27,604 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/zookeeper_0, clientPort=49190, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T17:56:27,615 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=49190 2024-12-16T17:56:27,628 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T17:56:27,633 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T17:56:27,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741825_1001 (size=7) 2024-12-16T17:56:28,259 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 with version=8 2024-12-16T17:56:28,259 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/hbase-staging 2024-12-16T17:56:28,362 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-16T17:56:28,585 INFO [Time-limited test {}] client.ConnectionUtils(129): master/3609ad07831c:0 server-side Connection retries=45 2024-12-16T17:56:28,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T17:56:28,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T17:56:28,601 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T17:56:28,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T17:56:28,601 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T17:56:28,718 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T17:56:28,765 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-16T17:56:28,772 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-16T17:56:28,775 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T17:56:28,796 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19824 (auto-detected) 2024-12-16T17:56:28,797 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-16T17:56:28,813 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38367 2024-12-16T17:56:28,820 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T17:56:28,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T17:56:28,832 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38367 connecting to ZooKeeper ensemble=127.0.0.1:49190 2024-12-16T17:56:28,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:383670x0, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T17:56:28,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38367-0x1002fe073560000 connected 2024-12-16T17:56:29,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T17:56:29,011 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T17:56:29,016 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T17:56:29,020 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38367 2024-12-16T17:56:29,020 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38367 2024-12-16T17:56:29,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38367 2024-12-16T17:56:29,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38367 2024-12-16T17:56:29,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38367 2024-12-16T17:56:29,029 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4, hbase.cluster.distributed=false 2024-12-16T17:56:29,086 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/3609ad07831c:0 server-side Connection retries=45 2024-12-16T17:56:29,086 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T17:56:29,087 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T17:56:29,087 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T17:56:29,087 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T17:56:29,087 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T17:56:29,089 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T17:56:29,091 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T17:56:29,092 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39733 2024-12-16T17:56:29,093 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T17:56:29,097 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T17:56:29,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T17:56:29,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T17:56:29,103 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:39733 connecting to ZooKeeper ensemble=127.0.0.1:49190 2024-12-16T17:56:29,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397330x0, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T17:56:29,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:397330x0, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T17:56:29,110 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39733-0x1002fe073560001 connected 2024-12-16T17:56:29,111 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T17:56:29,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T17:56:29,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39733 2024-12-16T17:56:29,117 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39733 2024-12-16T17:56:29,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39733 2024-12-16T17:56:29,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39733 2024-12-16T17:56:29,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39733 2024-12-16T17:56:29,122 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/3609ad07831c,38367,1734371788356 2024-12-16T17:56:29,133 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3609ad07831c:38367 2024-12-16T17:56:29,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T17:56:29,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T17:56:29,136 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3609ad07831c,38367,1734371788356 2024-12-16T17:56:29,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T17:56:29,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T17:56:29,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:29,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:29,160 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T17:56:29,161 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T17:56:29,161 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3609ad07831c,38367,1734371788356 from backup master directory 2024-12-16T17:56:29,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3609ad07831c,38367,1734371788356 2024-12-16T17:56:29,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T17:56:29,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T17:56:29,169 WARN [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T17:56:29,169 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3609ad07831c,38367,1734371788356 2024-12-16T17:56:29,171 INFO [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-16T17:56:29,172 INFO [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-16T17:56:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741826_1002 (size=42) 2024-12-16T17:56:29,634 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/hbase.id with ID: 212c0433-1ce0-4906-af52-f78f31888244 2024-12-16T17:56:29,678 INFO [master/3609ad07831c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T17:56:29,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:29,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:29,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741827_1003 (size=196) 2024-12-16T17:56:30,152 INFO [master/3609ad07831c:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:56:30,154 INFO [master/3609ad07831c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T17:56:30,168 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:30,171 INFO [master/3609ad07831c:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T17:56:30,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741828_1004 (size=1189) 2024-12-16T17:56:30,620 INFO [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store 2024-12-16T17:56:30,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741829_1005 (size=34) 2024-12-16T17:56:31,039 INFO [master/3609ad07831c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-16T17:56:31,040 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:31,042 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T17:56:31,042 INFO [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:56:31,042 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:56:31,042 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T17:56:31,042 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:56:31,043 INFO [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:56:31,043 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T17:56:31,046 WARN [master/3609ad07831c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/.initializing 2024-12-16T17:56:31,046 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/WALs/3609ad07831c,38367,1734371788356 2024-12-16T17:56:31,052 INFO [master/3609ad07831c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T17:56:31,062 INFO [master/3609ad07831c:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3609ad07831c%2C38367%2C1734371788356, suffix=, logDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/WALs/3609ad07831c,38367,1734371788356, archiveDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/oldWALs, maxLogs=10 2024-12-16T17:56:31,079 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/WALs/3609ad07831c,38367,1734371788356/3609ad07831c%2C38367%2C1734371788356.1734371791066, exclude list is [], retry=0 2024-12-16T17:56:31,094 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41817,DS-89ba1694-70fd-483d-b6da-33a13e44fdb5,DISK] 2024-12-16T17:56:31,096 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-16T17:56:31,127 INFO [master/3609ad07831c:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/WALs/3609ad07831c,38367,1734371788356/3609ad07831c%2C38367%2C1734371788356.1734371791066 2024-12-16T17:56:31,128 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38687:38687)] 2024-12-16T17:56:31,129 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:56:31,129 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:31,133 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,134 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T17:56:31,189 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:31,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T17:56:31,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T17:56:31,196 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:31,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:56:31,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T17:56:31,200 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:31,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:56:31,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T17:56:31,204 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:31,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:56:31,209 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,210 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,218 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T17:56:31,222 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T17:56:31,227 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:56:31,228 INFO [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63801293, jitterRate=-0.049286648631095886}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T17:56:31,232 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T17:56:31,233 INFO [master/3609ad07831c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T17:56:31,256 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769bc9a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:31,283 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T17:56:31,292 INFO [master/3609ad07831c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T17:56:31,292 INFO [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T17:56:31,294 INFO [master/3609ad07831c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T17:56:31,295 INFO [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-16T17:56:31,300 INFO [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-16T17:56:31,300 INFO [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T17:56:31,322 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T17:56:31,332 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T17:56:31,435 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T17:56:31,442 INFO [master/3609ad07831c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T17:56:31,445 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T17:56:31,451 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T17:56:31,452 INFO [master/3609ad07831c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T17:56:31,455 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T17:56:31,459 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T17:56:31,460 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T17:56:31,467 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T17:56:31,478 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T17:56:31,484 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T17:56:31,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T17:56:31,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T17:56:31,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:31,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:31,494 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=3609ad07831c,38367,1734371788356, sessionid=0x1002fe073560000, setting cluster-up flag (Was=false) 2024-12-16T17:56:31,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:31,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:31,543 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T17:56:31,546 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3609ad07831c,38367,1734371788356 2024-12-16T17:56:31,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:31,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:31,593 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T17:56:31,595 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3609ad07831c,38367,1734371788356 2024-12-16T17:56:31,635 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3609ad07831c:39733 2024-12-16T17:56:31,637 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1008): ClusterId : 212c0433-1ce0-4906-af52-f78f31888244 2024-12-16T17:56:31,639 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T17:56:31,651 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T17:56:31,651 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T17:56:31,660 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T17:56:31,661 DEBUG [RS:0;3609ad07831c:39733 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c057a30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:31,663 DEBUG [RS:0;3609ad07831c:39733 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@803b35e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3609ad07831c/172.17.0.2:0 2024-12-16T17:56:31,666 INFO [RS:0;3609ad07831c:39733 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T17:56:31,667 INFO [RS:0;3609ad07831c:39733 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T17:56:31,667 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T17:56:31,669 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(3073): reportForDuty to master=3609ad07831c,38367,1734371788356 with isa=3609ad07831c/172.17.0.2:39733, startcode=1734371789085 2024-12-16T17:56:31,669 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T17:56:31,674 INFO [master/3609ad07831c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T17:56:31,677 INFO [master/3609ad07831c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T17:56:31,679 DEBUG [RS:0;3609ad07831c:39733 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T17:56:31,681 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3609ad07831c,38367,1734371788356 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T17:56:31,684 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3609ad07831c:0, corePoolSize=5, maxPoolSize=5 2024-12-16T17:56:31,684 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3609ad07831c:0, corePoolSize=5, maxPoolSize=5 2024-12-16T17:56:31,684 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3609ad07831c:0, corePoolSize=5, maxPoolSize=5 2024-12-16T17:56:31,685 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3609ad07831c:0, corePoolSize=5, maxPoolSize=5 2024-12-16T17:56:31,685 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3609ad07831c:0, corePoolSize=10, maxPoolSize=10 2024-12-16T17:56:31,685 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,685 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3609ad07831c:0, corePoolSize=2, maxPoolSize=2 2024-12-16T17:56:31,685 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,686 INFO [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734371821686 2024-12-16T17:56:31,688 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T17:56:31,689 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T17:56:31,690 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T17:56:31,691 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T17:56:31,692 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T17:56:31,692 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T17:56:31,693 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T17:56:31,693 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T17:56:31,693 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,696 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T17:56:31,696 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:31,697 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T17:56:31,697 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T17:56:31,698 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T17:56:31,700 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T17:56:31,700 INFO [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T17:56:31,704 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3609ad07831c:0:becomeActiveMaster-HFileCleaner.large.0-1734371791704,5,FailOnTimeoutGroup] 2024-12-16T17:56:31,708 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3609ad07831c:0:becomeActiveMaster-HFileCleaner.small.0-1734371791705,5,FailOnTimeoutGroup] 2024-12-16T17:56:31,709 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,709 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T17:56:31,710 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,711 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741831_1007 (size=1039) 2024-12-16T17:56:31,717 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36235, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T17:56:31,723 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38367 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 3609ad07831c,39733,1734371789085 2024-12-16T17:56:31,725 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38367 {}] master.ServerManager(486): Registering regionserver=3609ad07831c,39733,1734371789085 2024-12-16T17:56:31,738 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:56:31,738 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40431 2024-12-16T17:56:31,738 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T17:56:31,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T17:56:31,751 DEBUG [RS:0;3609ad07831c:39733 {}] zookeeper.ZKUtil(111): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3609ad07831c,39733,1734371789085 2024-12-16T17:56:31,752 WARN [RS:0;3609ad07831c:39733 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T17:56:31,752 INFO [RS:0;3609ad07831c:39733 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T17:56:31,752 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/WALs/3609ad07831c,39733,1734371789085 2024-12-16T17:56:31,754 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3609ad07831c,39733,1734371789085] 2024-12-16T17:56:31,764 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T17:56:31,773 INFO [RS:0;3609ad07831c:39733 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T17:56:31,784 INFO [RS:0;3609ad07831c:39733 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T17:56:31,787 INFO [RS:0;3609ad07831c:39733 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T17:56:31,787 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,788 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T17:56:31,794 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,794 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,794 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,794 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,794 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3609ad07831c:0, corePoolSize=2, maxPoolSize=2 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3609ad07831c:0, corePoolSize=1, maxPoolSize=1 2024-12-16T17:56:31,795 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3609ad07831c:0, corePoolSize=3, maxPoolSize=3 2024-12-16T17:56:31,796 DEBUG [RS:0;3609ad07831c:39733 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0, corePoolSize=3, maxPoolSize=3 2024-12-16T17:56:31,796 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,796 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,797 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,797 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,797 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,39733,1734371789085-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T17:56:31,816 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T17:56:31,818 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,39733,1734371789085-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:31,835 INFO [RS:0;3609ad07831c:39733 {}] regionserver.Replication(204): 3609ad07831c,39733,1734371789085 started 2024-12-16T17:56:31,836 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1767): Serving as 3609ad07831c,39733,1734371789085, RpcServer on 3609ad07831c/172.17.0.2:39733, sessionid=0x1002fe073560001 2024-12-16T17:56:31,837 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T17:56:31,837 DEBUG [RS:0;3609ad07831c:39733 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3609ad07831c,39733,1734371789085 2024-12-16T17:56:31,837 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3609ad07831c,39733,1734371789085' 2024-12-16T17:56:31,837 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T17:56:31,838 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T17:56:31,839 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T17:56:31,839 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T17:56:31,839 DEBUG [RS:0;3609ad07831c:39733 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3609ad07831c,39733,1734371789085 2024-12-16T17:56:31,839 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3609ad07831c,39733,1734371789085' 2024-12-16T17:56:31,839 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T17:56:31,840 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T17:56:31,840 DEBUG [RS:0;3609ad07831c:39733 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T17:56:31,840 INFO [RS:0;3609ad07831c:39733 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T17:56:31,840 INFO [RS:0;3609ad07831c:39733 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T17:56:31,949 INFO [RS:0;3609ad07831c:39733 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T17:56:31,955 INFO [RS:0;3609ad07831c:39733 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3609ad07831c%2C39733%2C1734371789085, suffix=, logDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/WALs/3609ad07831c,39733,1734371789085, archiveDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/oldWALs, maxLogs=32 2024-12-16T17:56:31,970 DEBUG [RS:0;3609ad07831c:39733 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/WALs/3609ad07831c,39733,1734371789085/3609ad07831c%2C39733%2C1734371789085.1734371791958, exclude list is [], retry=0 2024-12-16T17:56:31,974 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41817,DS-89ba1694-70fd-483d-b6da-33a13e44fdb5,DISK] 2024-12-16T17:56:31,978 INFO [RS:0;3609ad07831c:39733 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/WALs/3609ad07831c,39733,1734371789085/3609ad07831c%2C39733%2C1734371789085.1734371791958 2024-12-16T17:56:31,978 DEBUG [RS:0;3609ad07831c:39733 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38687:38687)] 2024-12-16T17:56:32,118 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T17:56:32,118 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:56:32,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741833_1009 (size=32) 2024-12-16T17:56:32,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:32,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T17:56:32,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T17:56:32,544 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:32,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T17:56:32,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T17:56:32,547 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T17:56:32,547 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:32,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T17:56:32,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T17:56:32,550 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T17:56:32,550 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:32,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T17:56:32,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740 2024-12-16T17:56:32,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740 2024-12-16T17:56:32,555 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:56:32,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T17:56:32,561 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:56:32,562 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69232030, jitterRate=0.03163763880729675}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:56:32,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T17:56:32,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T17:56:32,565 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T17:56:32,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T17:56:32,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T17:56:32,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T17:56:32,566 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T17:56:32,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T17:56:32,569 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T17:56:32,569 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T17:56:32,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T17:56:32,581 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T17:56:32,583 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T17:56:32,735 DEBUG [3609ad07831c:38367 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-16T17:56:32,738 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:56:32,743 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3609ad07831c,39733,1734371789085, state=OPENING 2024-12-16T17:56:32,859 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T17:56:32,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:32,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:32,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T17:56:32,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T17:56:32,873 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:56:33,053 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:33,055 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T17:56:33,058 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T17:56:33,067 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T17:56:33,067 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T17:56:33,068 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-16T17:56:33,071 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3609ad07831c%2C39733%2C1734371789085.meta, suffix=.meta, logDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/WALs/3609ad07831c,39733,1734371789085, archiveDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/oldWALs, maxLogs=32 2024-12-16T17:56:33,085 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/WALs/3609ad07831c,39733,1734371789085/3609ad07831c%2C39733%2C1734371789085.meta.1734371793072.meta, exclude list is [], retry=0 2024-12-16T17:56:33,088 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41817,DS-89ba1694-70fd-483d-b6da-33a13e44fdb5,DISK] 2024-12-16T17:56:33,091 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/WALs/3609ad07831c,39733,1734371789085/3609ad07831c%2C39733%2C1734371789085.meta.1734371793072.meta 2024-12-16T17:56:33,091 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38687:38687)] 2024-12-16T17:56:33,092 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:56:33,093 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T17:56:33,138 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T17:56:33,142 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T17:56:33,146 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T17:56:33,146 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:33,146 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T17:56:33,146 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T17:56:33,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T17:56:33,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T17:56:33,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:33,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T17:56:33,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T17:56:33,153 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T17:56:33,153 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:33,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T17:56:33,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T17:56:33,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T17:56:33,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:33,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T17:56:33,157 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740 2024-12-16T17:56:33,160 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740 2024-12-16T17:56:33,162 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:56:33,164 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T17:56:33,166 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58750961, jitterRate=-0.12454245984554291}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:56:33,167 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T17:56:33,173 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734371793047 2024-12-16T17:56:33,182 DEBUG [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T17:56:33,183 INFO [RS_OPEN_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T17:56:33,184 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:56:33,185 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3609ad07831c,39733,1734371789085, state=OPEN 2024-12-16T17:56:33,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T17:56:33,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T17:56:33,214 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T17:56:33,214 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T17:56:33,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T17:56:33,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=3609ad07831c,39733,1734371789085 in 341 msec 2024-12-16T17:56:33,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T17:56:33,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 650 msec 2024-12-16T17:56:33,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.6040 sec 2024-12-16T17:56:33,237 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734371793237, completionTime=-1 2024-12-16T17:56:33,238 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-16T17:56:33,238 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T17:56:33,268 DEBUG [hconnection-0xf95a041-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:33,270 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:33,278 INFO [master/3609ad07831c:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-16T17:56:33,279 INFO [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734371853278 2024-12-16T17:56:33,279 INFO [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734371913279 2024-12-16T17:56:33,279 INFO [master/3609ad07831c:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 40 msec 2024-12-16T17:56:33,320 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,38367,1734371788356-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:33,321 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,38367,1734371788356-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:33,321 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,38367,1734371788356-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:33,322 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3609ad07831c:38367, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:33,322 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:33,327 DEBUG [master/3609ad07831c:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T17:56:33,330 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T17:56:33,331 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T17:56:33,336 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T17:56:33,339 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T17:56:33,340 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:33,341 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T17:56:33,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741835_1011 (size=358) 2024-12-16T17:56:33,357 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a4053c31d189c903d02c8274354da0e8, NAME => 'hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:56:33,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741836_1012 (size=42) 2024-12-16T17:56:33,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:33,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing a4053c31d189c903d02c8274354da0e8, disabling compactions & flushes 2024-12-16T17:56:33,767 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:56:33,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:56:33,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. after waiting 0 ms 2024-12-16T17:56:33,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:56:33,767 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:56:33,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for a4053c31d189c903d02c8274354da0e8: 2024-12-16T17:56:33,769 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T17:56:33,775 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734371793770"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734371793770"}]},"ts":"1734371793770"} 2024-12-16T17:56:33,795 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T17:56:33,797 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T17:56:33,799 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371793797"}]},"ts":"1734371793797"} 2024-12-16T17:56:33,804 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T17:56:33,854 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=a4053c31d189c903d02c8274354da0e8, ASSIGN}] 2024-12-16T17:56:33,859 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=a4053c31d189c903d02c8274354da0e8, ASSIGN 2024-12-16T17:56:33,862 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=a4053c31d189c903d02c8274354da0e8, ASSIGN; state=OFFLINE, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=false 2024-12-16T17:56:34,013 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=a4053c31d189c903d02c8274354da0e8, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:56:34,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure a4053c31d189c903d02c8274354da0e8, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:56:34,177 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:34,189 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:56:34,189 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => a4053c31d189c903d02c8274354da0e8, NAME => 'hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:56:34,190 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace a4053c31d189c903d02c8274354da0e8 2024-12-16T17:56:34,190 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:34,190 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for a4053c31d189c903d02c8274354da0e8 2024-12-16T17:56:34,190 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for a4053c31d189c903d02c8274354da0e8 2024-12-16T17:56:34,193 INFO [StoreOpener-a4053c31d189c903d02c8274354da0e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a4053c31d189c903d02c8274354da0e8 2024-12-16T17:56:34,196 INFO [StoreOpener-a4053c31d189c903d02c8274354da0e8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4053c31d189c903d02c8274354da0e8 columnFamilyName info 2024-12-16T17:56:34,196 DEBUG [StoreOpener-a4053c31d189c903d02c8274354da0e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:34,197 INFO [StoreOpener-a4053c31d189c903d02c8274354da0e8-1 {}] regionserver.HStore(327): Store=a4053c31d189c903d02c8274354da0e8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:56:34,198 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8 2024-12-16T17:56:34,199 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8 2024-12-16T17:56:34,202 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for a4053c31d189c903d02c8274354da0e8 2024-12-16T17:56:34,206 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:56:34,206 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened a4053c31d189c903d02c8274354da0e8; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62945195, jitterRate=-0.06204350292682648}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T17:56:34,207 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for a4053c31d189c903d02c8274354da0e8: 2024-12-16T17:56:34,209 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8., pid=6, masterSystemTime=1734371794176 2024-12-16T17:56:34,213 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:56:34,213 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:56:34,214 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=a4053c31d189c903d02c8274354da0e8, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:56:34,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T17:56:34,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure a4053c31d189c903d02c8274354da0e8, server=3609ad07831c,39733,1734371789085 in 197 msec 2024-12-16T17:56:34,224 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T17:56:34,224 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=a4053c31d189c903d02c8274354da0e8, ASSIGN in 366 msec 2024-12-16T17:56:34,226 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T17:56:34,226 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371794226"}]},"ts":"1734371794226"} 2024-12-16T17:56:34,229 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T17:56:34,236 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T17:56:34,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 905 msec 2024-12-16T17:56:34,240 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T17:56:34,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T17:56:34,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:34,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:56:34,277 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T17:56:34,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T17:56:34,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 39 msec 2024-12-16T17:56:34,322 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T17:56:34,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T17:56:34,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 32 msec 2024-12-16T17:56:34,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T17:56:34,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T17:56:34,401 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.232sec 2024-12-16T17:56:34,403 INFO [master/3609ad07831c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T17:56:34,405 INFO [master/3609ad07831c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T17:56:34,406 INFO [master/3609ad07831c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T17:56:34,407 INFO [master/3609ad07831c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T17:56:34,407 INFO [master/3609ad07831c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T17:56:34,408 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,38367,1734371788356-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T17:56:34,409 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,38367,1734371788356-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T17:56:34,415 DEBUG [master/3609ad07831c:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T17:56:34,416 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T17:56:34,416 INFO [master/3609ad07831c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3609ad07831c,38367,1734371788356-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T17:56:34,446 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x093315ff to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d9ec50d 2024-12-16T17:56:34,447 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-16T17:56:34,461 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66d532b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:34,465 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-16T17:56:34,465 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-16T17:56:34,475 DEBUG [hconnection-0x4e64c0d1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:34,482 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40236, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:34,489 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=3609ad07831c,38367,1734371788356 2024-12-16T17:56:34,501 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=120, ProcessCount=11, AvailableMemoryMB=4233 2024-12-16T17:56:34,510 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T17:56:34,512 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46774, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T17:56:34,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:56:34,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:56:34,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-16T17:56:34,526 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T17:56:34,526 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-16T17:56:34,527 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:34,546 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T17:56:34,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T17:56:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741837_1013 (size=963) 2024-12-16T17:56:34,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T17:56:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T17:56:34,967 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:56:34,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741838_1014 (size=53) 2024-12-16T17:56:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T17:56:35,382 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:35,382 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 99318ad6c4e7b8782230d738424ff705, disabling compactions & flushes 2024-12-16T17:56:35,383 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,383 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,383 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. after waiting 0 ms 2024-12-16T17:56:35,383 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,383 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,383 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:35,388 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T17:56:35,389 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734371795388"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734371795388"}]},"ts":"1734371795388"} 2024-12-16T17:56:35,392 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T17:56:35,394 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T17:56:35,394 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371795394"}]},"ts":"1734371795394"} 2024-12-16T17:56:35,397 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-16T17:56:35,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99318ad6c4e7b8782230d738424ff705, ASSIGN}] 2024-12-16T17:56:35,461 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99318ad6c4e7b8782230d738424ff705, ASSIGN 2024-12-16T17:56:35,462 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=99318ad6c4e7b8782230d738424ff705, ASSIGN; state=OFFLINE, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=false 2024-12-16T17:56:35,613 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=99318ad6c4e7b8782230d738424ff705, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:56:35,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:56:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T17:56:35,776 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:35,784 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,785 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:56:35,785 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,786 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:56:35,786 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,786 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,789 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,792 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:56:35,792 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99318ad6c4e7b8782230d738424ff705 columnFamilyName A 2024-12-16T17:56:35,792 DEBUG [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:35,793 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.HStore(327): Store=99318ad6c4e7b8782230d738424ff705/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:56:35,794 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,796 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:56:35,796 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99318ad6c4e7b8782230d738424ff705 columnFamilyName B 2024-12-16T17:56:35,796 DEBUG [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:35,797 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.HStore(327): Store=99318ad6c4e7b8782230d738424ff705/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:56:35,797 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,799 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:56:35,799 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99318ad6c4e7b8782230d738424ff705 columnFamilyName C 2024-12-16T17:56:35,799 DEBUG [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:56:35,800 INFO [StoreOpener-99318ad6c4e7b8782230d738424ff705-1 {}] regionserver.HStore(327): Store=99318ad6c4e7b8782230d738424ff705/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:56:35,801 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,802 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,803 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,805 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:56:35,807 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:35,810 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:56:35,811 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 99318ad6c4e7b8782230d738424ff705; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58779870, jitterRate=-0.12411168217658997}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:56:35,811 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:35,813 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., pid=11, masterSystemTime=1734371795775 2024-12-16T17:56:35,816 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,816 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:35,817 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=99318ad6c4e7b8782230d738424ff705, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:56:35,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T17:56:35,823 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 in 197 msec 2024-12-16T17:56:35,825 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T17:56:35,825 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=99318ad6c4e7b8782230d738424ff705, ASSIGN in 363 msec 2024-12-16T17:56:35,826 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T17:56:35,827 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371795826"}]},"ts":"1734371795826"} 2024-12-16T17:56:35,829 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-16T17:56:35,861 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T17:56:35,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3390 sec 2024-12-16T17:56:36,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T17:56:36,681 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-16T17:56:36,688 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f7b70c9 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59692011 2024-12-16T17:56:36,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@756b8d92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,751 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,754 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,757 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T17:56:36,760 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T17:56:36,768 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d5f32f0 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f936550 2024-12-16T17:56:36,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38f1cf45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,778 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x111a5244 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15270cd2 2024-12-16T17:56:36,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54665582, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,786 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f6c377 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73cb302d 2024-12-16T17:56:36,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1567a193, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,795 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x070038fb to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2de03a6e 2024-12-16T17:56:36,801 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ed20369, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,803 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c1b9a1b to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d3220d9 2024-12-16T17:56:36,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13c5b75d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,811 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c4dd458 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24f84183 2024-12-16T17:56:36,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@566455fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,819 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51254994 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37b7fe6a 2024-12-16T17:56:36,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@438886c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,829 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x668d0ebc to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61341214 2024-12-16T17:56:36,835 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f324586, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,837 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00a9bab0 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@414cac03 2024-12-16T17:56:36,843 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cafcfd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:56:36,852 DEBUG [hconnection-0x47be79fd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,852 DEBUG [hconnection-0x293c9f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,852 DEBUG [hconnection-0x4b969ab9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,852 DEBUG [hconnection-0x34907d80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,853 DEBUG [hconnection-0x169ff269-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,853 DEBUG [hconnection-0x131c45f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,853 DEBUG [hconnection-0x3d2cc084-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,854 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,855 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,856 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,856 DEBUG [hconnection-0x2f204f5e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:36,857 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,858 DEBUG [hconnection-0x3679459b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:56:36,858 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,861 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,861 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,862 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-16T17:56:36,863 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:56:36,864 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T17:56:36,867 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:36,868 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:36,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:36,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-16T17:56:36,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:36,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:36,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:36,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:36,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:36,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:36,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T17:56:37,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371857015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/27e28eaf7dfc4e9a848283467fc94f93 is 50, key is test_row_0/A:col10/1734371796897/Put/seqid=0 2024-12-16T17:56:37,030 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:37,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371857018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371857019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371857019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371857022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:37,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741839_1015 (size=12001) 2024-12-16T17:56:37,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371857160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371857161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371857161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371857161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371857163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T17:56:37,220 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:37,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:37,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371857367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371857367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371857371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371857373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371857373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:37,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:37,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T17:56:37,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/27e28eaf7dfc4e9a848283467fc94f93 2024-12-16T17:56:37,542 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:37,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/101adadc71214d0d82a7842965784165 is 50, key is test_row_0/B:col10/1734371796897/Put/seqid=0 2024-12-16T17:56:37,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741840_1016 (size=12001) 2024-12-16T17:56:37,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371857674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371857678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371857683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371857683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371857698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:37,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:37,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,769 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-16T17:56:37,770 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T17:56:37,771 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T17:56:37,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:37,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:37,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:37,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:37,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T17:56:37,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/101adadc71214d0d82a7842965784165 2024-12-16T17:56:38,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:38,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:38,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/12d72cbd16064807829fbc7be4c42973 is 50, key is test_row_0/C:col10/1734371796897/Put/seqid=0 2024-12-16T17:56:38,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741841_1017 (size=12001) 2024-12-16T17:56:38,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/12d72cbd16064807829fbc7be4c42973 2024-12-16T17:56:38,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/27e28eaf7dfc4e9a848283467fc94f93 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/27e28eaf7dfc4e9a848283467fc94f93 2024-12-16T17:56:38,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/27e28eaf7dfc4e9a848283467fc94f93, entries=150, sequenceid=14, filesize=11.7 K 2024-12-16T17:56:38,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/101adadc71214d0d82a7842965784165 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/101adadc71214d0d82a7842965784165 2024-12-16T17:56:38,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/101adadc71214d0d82a7842965784165, entries=150, sequenceid=14, filesize=11.7 K 2024-12-16T17:56:38,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/12d72cbd16064807829fbc7be4c42973 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/12d72cbd16064807829fbc7be4c42973 2024-12-16T17:56:38,166 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:38,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:38,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/12d72cbd16064807829fbc7be4c42973, entries=150, sequenceid=14, filesize=11.7 K 2024-12-16T17:56:38,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 99318ad6c4e7b8782230d738424ff705 in 1250ms, sequenceid=14, compaction requested=false 2024-12-16T17:56:38,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:38,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-16T17:56:38,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:38,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:38,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:38,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:38,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:38,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:38,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/ec695928b4634e2e94c6f1c27cfe61be is 50, key is test_row_0/A:col10/1734371798188/Put/seqid=0 2024-12-16T17:56:38,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741842_1018 (size=14341) 2024-12-16T17:56:38,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/ec695928b4634e2e94c6f1c27cfe61be 2024-12-16T17:56:38,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371858259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371858259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371858261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371858264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/daede70eb63045e6a7abb121943d3e21 is 50, key is test_row_0/B:col10/1734371798188/Put/seqid=0 2024-12-16T17:56:38,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371858279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741843_1019 (size=12001) 2024-12-16T17:56:38,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/daede70eb63045e6a7abb121943d3e21 2024-12-16T17:56:38,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:38,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:38,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/84751958ddbb492fb1c2096effdef643 is 50, key is test_row_0/C:col10/1734371798188/Put/seqid=0 2024-12-16T17:56:38,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741844_1020 (size=12001) 2024-12-16T17:56:38,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371858384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371858385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371858387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371858387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371858402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,478 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:38,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:38,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371858592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371858596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371858595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371858597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371858608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,632 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:38,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:38,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,659 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-16T17:56:38,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-16T17:56:38,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-16T17:56:38,764 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T17:56:38,765 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-16T17:56:38,766 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T17:56:38,766 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-16T17:56:38,767 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-16T17:56:38,767 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-16T17:56:38,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-16T17:56:38,770 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-16T17:56:38,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/84751958ddbb492fb1c2096effdef643 2024-12-16T17:56:38,786 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:38,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:38,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/ec695928b4634e2e94c6f1c27cfe61be as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/ec695928b4634e2e94c6f1c27cfe61be 2024-12-16T17:56:38,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/ec695928b4634e2e94c6f1c27cfe61be, entries=200, sequenceid=38, filesize=14.0 K 2024-12-16T17:56:38,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/daede70eb63045e6a7abb121943d3e21 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/daede70eb63045e6a7abb121943d3e21 2024-12-16T17:56:38,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/daede70eb63045e6a7abb121943d3e21, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:56:38,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/84751958ddbb492fb1c2096effdef643 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/84751958ddbb492fb1c2096effdef643 2024-12-16T17:56:38,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/84751958ddbb492fb1c2096effdef643, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:56:38,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 99318ad6c4e7b8782230d738424ff705 in 664ms, sequenceid=38, compaction requested=false 2024-12-16T17:56:38,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:38,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:38,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:56:38,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:38,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:38,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:38,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:38,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:38,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:38,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/088927053dee47739704dacde7207c95 is 50, key is test_row_0/A:col10/1734371798260/Put/seqid=0 2024-12-16T17:56:38,943 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:38,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371858948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371858946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371858954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371858956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371858956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:38,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741845_1021 (size=12001) 2024-12-16T17:56:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T17:56:39,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371859058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371859058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371859063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371859065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371859065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,101 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:39,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:39,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371859263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371859263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371859266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371859269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371859273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,370 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/088927053dee47739704dacde7207c95 2024-12-16T17:56:39,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/43907416581c4567bfdf14e9de60b53c is 50, key is test_row_0/B:col10/1734371798260/Put/seqid=0 2024-12-16T17:56:39,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741846_1022 (size=12001) 2024-12-16T17:56:39,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/43907416581c4567bfdf14e9de60b53c 2024-12-16T17:56:39,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:39,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:39,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:39,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/93eb40b7206b4047928b9bfdb2a7024a is 50, key is test_row_0/C:col10/1734371798260/Put/seqid=0 2024-12-16T17:56:39,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741847_1023 (size=12001) 2024-12-16T17:56:39,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/93eb40b7206b4047928b9bfdb2a7024a 2024-12-16T17:56:39,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/088927053dee47739704dacde7207c95 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/088927053dee47739704dacde7207c95 2024-12-16T17:56:39,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/088927053dee47739704dacde7207c95, entries=150, sequenceid=51, filesize=11.7 K 2024-12-16T17:56:39,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/43907416581c4567bfdf14e9de60b53c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/43907416581c4567bfdf14e9de60b53c 2024-12-16T17:56:39,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/43907416581c4567bfdf14e9de60b53c, entries=150, sequenceid=51, filesize=11.7 K 2024-12-16T17:56:39,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/93eb40b7206b4047928b9bfdb2a7024a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/93eb40b7206b4047928b9bfdb2a7024a 2024-12-16T17:56:39,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/93eb40b7206b4047928b9bfdb2a7024a, entries=150, sequenceid=51, filesize=11.7 K 2024-12-16T17:56:39,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 99318ad6c4e7b8782230d738424ff705 in 641ms, sequenceid=51, compaction requested=true 2024-12-16T17:56:39,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:39,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:39,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:39,565 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:39,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:39,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:39,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:39,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:39,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T17:56:39,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,569 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-16T17:56:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:39,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:39,572 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:39,575 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:39,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:39,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:39,578 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:39,578 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,578 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/101adadc71214d0d82a7842965784165, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/daede70eb63045e6a7abb121943d3e21, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/43907416581c4567bfdf14e9de60b53c] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.2 K 2024-12-16T17:56:39,580 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 101adadc71214d0d82a7842965784165, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734371796897 2024-12-16T17:56:39,581 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting daede70eb63045e6a7abb121943d3e21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371797014 2024-12-16T17:56:39,583 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 43907416581c4567bfdf14e9de60b53c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734371798224 2024-12-16T17:56:39,584 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:39,585 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:39,585 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,586 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/27e28eaf7dfc4e9a848283467fc94f93, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/ec695928b4634e2e94c6f1c27cfe61be, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/088927053dee47739704dacde7207c95] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.4 K 2024-12-16T17:56:39,587 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27e28eaf7dfc4e9a848283467fc94f93, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734371796897 2024-12-16T17:56:39,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/52be8eb4aef0458b9c56944ba033f8f7 is 50, key is test_row_0/A:col10/1734371798939/Put/seqid=0 2024-12-16T17:56:39,589 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec695928b4634e2e94c6f1c27cfe61be, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371797014 2024-12-16T17:56:39,591 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 088927053dee47739704dacde7207c95, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734371798224 2024-12-16T17:56:39,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371859593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371859594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371859594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371859608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371859611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741848_1024 (size=12001) 2024-12-16T17:56:39,644 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#11 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:39,645 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#10 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:39,645 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/5644b2037651494ab24f116fdd99d0cc is 50, key is test_row_0/B:col10/1734371798260/Put/seqid=0 2024-12-16T17:56:39,647 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/737f0e1420f0408594f1fce94c38008e is 50, key is test_row_0/A:col10/1734371798260/Put/seqid=0 2024-12-16T17:56:39,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741849_1025 (size=12104) 2024-12-16T17:56:39,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741850_1026 (size=12104) 2024-12-16T17:56:39,695 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/5644b2037651494ab24f116fdd99d0cc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5644b2037651494ab24f116fdd99d0cc 2024-12-16T17:56:39,708 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/737f0e1420f0408594f1fce94c38008e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/737f0e1420f0408594f1fce94c38008e 2024-12-16T17:56:39,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371859711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371859711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371859712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371859720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371859721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,728 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 5644b2037651494ab24f116fdd99d0cc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:39,728 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:39,728 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371799565; duration=0sec 2024-12-16T17:56:39,729 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 737f0e1420f0408594f1fce94c38008e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:39,729 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:39,729 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371799549; duration=0sec 2024-12-16T17:56:39,729 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:39,729 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:39,729 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:39,729 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:39,729 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:39,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:39,734 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:39,734 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:39,734 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/12d72cbd16064807829fbc7be4c42973, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/84751958ddbb492fb1c2096effdef643, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/93eb40b7206b4047928b9bfdb2a7024a] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.2 K 2024-12-16T17:56:39,735 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 12d72cbd16064807829fbc7be4c42973, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734371796897 2024-12-16T17:56:39,736 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 84751958ddbb492fb1c2096effdef643, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371797014 2024-12-16T17:56:39,738 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 93eb40b7206b4047928b9bfdb2a7024a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734371798224 2024-12-16T17:56:39,768 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#12 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:39,768 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/118c401fe52b44db99ae7d6d6b092616 is 50, key is test_row_0/C:col10/1734371798260/Put/seqid=0 2024-12-16T17:56:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741851_1027 (size=12104) 2024-12-16T17:56:39,794 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/118c401fe52b44db99ae7d6d6b092616 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/118c401fe52b44db99ae7d6d6b092616 2024-12-16T17:56:39,810 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 118c401fe52b44db99ae7d6d6b092616(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:39,811 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:39,811 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371799567; duration=0sec 2024-12-16T17:56:39,811 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:39,811 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:39,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371859918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371859919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371859919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371859926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371859926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,026 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/52be8eb4aef0458b9c56944ba033f8f7 2024-12-16T17:56:40,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/08d4b8ae54014556a2449b481e7d4e8f is 50, key is test_row_0/B:col10/1734371798939/Put/seqid=0 2024-12-16T17:56:40,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741852_1028 (size=12001) 2024-12-16T17:56:40,065 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/08d4b8ae54014556a2449b481e7d4e8f 2024-12-16T17:56:40,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/fa96b4a4d12f4db5a3045a4a74327747 is 50, key is test_row_0/C:col10/1734371798939/Put/seqid=0 2024-12-16T17:56:40,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741853_1029 (size=12001) 2024-12-16T17:56:40,118 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/fa96b4a4d12f4db5a3045a4a74327747 2024-12-16T17:56:40,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/52be8eb4aef0458b9c56944ba033f8f7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/52be8eb4aef0458b9c56944ba033f8f7 2024-12-16T17:56:40,152 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/52be8eb4aef0458b9c56944ba033f8f7, entries=150, sequenceid=74, filesize=11.7 K 2024-12-16T17:56:40,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/08d4b8ae54014556a2449b481e7d4e8f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08d4b8ae54014556a2449b481e7d4e8f 2024-12-16T17:56:40,169 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08d4b8ae54014556a2449b481e7d4e8f, entries=150, sequenceid=74, filesize=11.7 K 2024-12-16T17:56:40,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/fa96b4a4d12f4db5a3045a4a74327747 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/fa96b4a4d12f4db5a3045a4a74327747 2024-12-16T17:56:40,185 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/fa96b4a4d12f4db5a3045a4a74327747, entries=150, sequenceid=74, filesize=11.7 K 2024-12-16T17:56:40,188 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 99318ad6c4e7b8782230d738424ff705 in 619ms, sequenceid=74, compaction requested=false 2024-12-16T17:56:40,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:40,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:40,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-16T17:56:40,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-16T17:56:40,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-16T17:56:40,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3230 sec 2024-12-16T17:56:40,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 3.3390 sec 2024-12-16T17:56:40,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-16T17:56:40,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:40,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:40,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:40,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:40,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/540d85c4a8504848a354eea6445834f2 is 50, key is test_row_0/A:col10/1734371800226/Put/seqid=0 2024-12-16T17:56:40,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741854_1030 (size=12001) 2024-12-16T17:56:40,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/540d85c4a8504848a354eea6445834f2 2024-12-16T17:56:40,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371860266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371860269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371860267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371860270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/733a372abe194589853feda1be9c6525 is 50, key is test_row_0/B:col10/1734371800226/Put/seqid=0 2024-12-16T17:56:40,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371860273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741855_1031 (size=12001) 2024-12-16T17:56:40,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/733a372abe194589853feda1be9c6525 2024-12-16T17:56:40,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/8492014779154d899b20b0cf42ff3ca0 is 50, key is test_row_0/C:col10/1734371800226/Put/seqid=0 2024-12-16T17:56:40,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741856_1032 (size=12001) 2024-12-16T17:56:40,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/8492014779154d899b20b0cf42ff3ca0 2024-12-16T17:56:40,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/540d85c4a8504848a354eea6445834f2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/540d85c4a8504848a354eea6445834f2 2024-12-16T17:56:40,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/540d85c4a8504848a354eea6445834f2, entries=150, sequenceid=93, filesize=11.7 K 2024-12-16T17:56:40,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/733a372abe194589853feda1be9c6525 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/733a372abe194589853feda1be9c6525 2024-12-16T17:56:40,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371860376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371860377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371860377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371860378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371860400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/733a372abe194589853feda1be9c6525, entries=150, sequenceid=93, filesize=11.7 K 2024-12-16T17:56:40,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/8492014779154d899b20b0cf42ff3ca0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8492014779154d899b20b0cf42ff3ca0 2024-12-16T17:56:40,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8492014779154d899b20b0cf42ff3ca0, entries=150, sequenceid=93, filesize=11.7 K 2024-12-16T17:56:40,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 99318ad6c4e7b8782230d738424ff705 in 192ms, sequenceid=93, compaction requested=true 2024-12-16T17:56:40,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:40,421 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:40,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:40,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:40,423 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:40,423 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:40,423 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:40,423 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:40,424 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/737f0e1420f0408594f1fce94c38008e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/52be8eb4aef0458b9c56944ba033f8f7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/540d85c4a8504848a354eea6445834f2] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.3 K 2024-12-16T17:56:40,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:40,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:40,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:40,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:40,425 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 737f0e1420f0408594f1fce94c38008e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734371798224 2024-12-16T17:56:40,425 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:40,426 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:40,426 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:40,426 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5644b2037651494ab24f116fdd99d0cc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08d4b8ae54014556a2449b481e7d4e8f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/733a372abe194589853feda1be9c6525] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.3 K 2024-12-16T17:56:40,427 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 5644b2037651494ab24f116fdd99d0cc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734371798224 2024-12-16T17:56:40,427 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52be8eb4aef0458b9c56944ba033f8f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734371798939 2024-12-16T17:56:40,428 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 08d4b8ae54014556a2449b481e7d4e8f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734371798939 2024-12-16T17:56:40,429 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 540d85c4a8504848a354eea6445834f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734371799606 2024-12-16T17:56:40,429 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 733a372abe194589853feda1be9c6525, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734371799606 2024-12-16T17:56:40,456 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:40,457 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/87ad12d6fa474caa9e3cb9fb28257fb6 is 50, key is test_row_0/B:col10/1734371800226/Put/seqid=0 2024-12-16T17:56:40,466 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#19 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:40,467 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/48ad14bc92fe4d76800e031cc7bd4e3a is 50, key is test_row_0/A:col10/1734371800226/Put/seqid=0 2024-12-16T17:56:40,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741857_1033 (size=12207) 2024-12-16T17:56:40,505 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/87ad12d6fa474caa9e3cb9fb28257fb6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/87ad12d6fa474caa9e3cb9fb28257fb6 2024-12-16T17:56:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741858_1034 (size=12207) 2024-12-16T17:56:40,529 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 87ad12d6fa474caa9e3cb9fb28257fb6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:40,529 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:40,529 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/48ad14bc92fe4d76800e031cc7bd4e3a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/48ad14bc92fe4d76800e031cc7bd4e3a 2024-12-16T17:56:40,529 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371800423; duration=0sec 2024-12-16T17:56:40,529 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:40,529 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:40,529 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:40,531 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:40,531 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:40,531 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:40,531 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/118c401fe52b44db99ae7d6d6b092616, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/fa96b4a4d12f4db5a3045a4a74327747, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8492014779154d899b20b0cf42ff3ca0] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.3 K 2024-12-16T17:56:40,533 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 118c401fe52b44db99ae7d6d6b092616, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734371798224 2024-12-16T17:56:40,536 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting fa96b4a4d12f4db5a3045a4a74327747, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734371798939 2024-12-16T17:56:40,538 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 8492014779154d899b20b0cf42ff3ca0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734371799606 2024-12-16T17:56:40,541 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 48ad14bc92fe4d76800e031cc7bd4e3a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:40,541 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:40,541 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371800421; duration=0sec 2024-12-16T17:56:40,541 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:40,541 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:40,566 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#20 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:40,567 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/5f05729b96e54a61b2441059c8fb456e is 50, key is test_row_0/C:col10/1734371800226/Put/seqid=0 2024-12-16T17:56:40,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741859_1035 (size=12207) 2024-12-16T17:56:40,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-16T17:56:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:40,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:40,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:40,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:40,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,608 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/5f05729b96e54a61b2441059c8fb456e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/5f05729b96e54a61b2441059c8fb456e 2024-12-16T17:56:40,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d0b4ee1360ac4a698c6bdc3535dcdd36 is 50, key is test_row_0/A:col10/1734371800270/Put/seqid=0 2024-12-16T17:56:40,627 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 5f05729b96e54a61b2441059c8fb456e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:40,627 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:40,627 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371800424; duration=0sec 2024-12-16T17:56:40,627 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:40,627 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:40,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371860621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371860625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371860623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371860629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371860630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741860_1036 (size=12001) 2024-12-16T17:56:40,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d0b4ee1360ac4a698c6bdc3535dcdd36 2024-12-16T17:56:40,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/13c44e4d2dc242dc9b01ea664127fc43 is 50, key is test_row_0/B:col10/1734371800270/Put/seqid=0 2024-12-16T17:56:40,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741861_1037 (size=12001) 2024-12-16T17:56:40,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/13c44e4d2dc242dc9b01ea664127fc43 2024-12-16T17:56:40,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/1d755e8160be4b84928ba95be813cf7a is 50, key is test_row_0/C:col10/1734371800270/Put/seqid=0 2024-12-16T17:56:40,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741862_1038 (size=12001) 2024-12-16T17:56:40,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/1d755e8160be4b84928ba95be813cf7a 2024-12-16T17:56:40,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371860733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371860734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371860735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371860735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371860736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d0b4ee1360ac4a698c6bdc3535dcdd36 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d0b4ee1360ac4a698c6bdc3535dcdd36 2024-12-16T17:56:40,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d0b4ee1360ac4a698c6bdc3535dcdd36, entries=150, sequenceid=117, filesize=11.7 K 2024-12-16T17:56:40,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/13c44e4d2dc242dc9b01ea664127fc43 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/13c44e4d2dc242dc9b01ea664127fc43 2024-12-16T17:56:40,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/13c44e4d2dc242dc9b01ea664127fc43, entries=150, sequenceid=117, filesize=11.7 K 2024-12-16T17:56:40,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/1d755e8160be4b84928ba95be813cf7a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1d755e8160be4b84928ba95be813cf7a 2024-12-16T17:56:40,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1d755e8160be4b84928ba95be813cf7a, entries=150, sequenceid=117, filesize=11.7 K 2024-12-16T17:56:40,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 99318ad6c4e7b8782230d738424ff705 in 199ms, sequenceid=117, compaction requested=false 2024-12-16T17:56:40,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:40,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:40,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-16T17:56:40,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:40,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:40,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:40,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:40,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d8a6dda673a1421d8ee759326067306f is 50, key is test_row_0/A:col10/1734371800614/Put/seqid=0 2024-12-16T17:56:40,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741863_1039 (size=12101) 2024-12-16T17:56:40,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371860964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371860969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371860970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371860971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371860974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:40,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T17:56:40,977 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-16T17:56:40,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-16T17:56:40,984 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:40,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T17:56:40,985 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:40,986 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:41,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371861075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371861076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371861078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371861079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371861080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T17:56:41,139 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-16T17:56:41,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:41,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371861283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371861282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371861283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371861284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T17:56:41,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371861288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,294 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-16T17:56:41,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:41,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d8a6dda673a1421d8ee759326067306f 2024-12-16T17:56:41,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/4c4d4d89e1d04455b7fd11b687c00e2f is 50, key is test_row_0/B:col10/1734371800614/Put/seqid=0 2024-12-16T17:56:41,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741864_1040 (size=12101) 2024-12-16T17:56:41,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/4c4d4d89e1d04455b7fd11b687c00e2f 2024-12-16T17:56:41,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/307f6d1dc0ec4087b0538cd1ede94c5a is 50, key is test_row_0/C:col10/1734371800614/Put/seqid=0 2024-12-16T17:56:41,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741865_1041 (size=12101) 2024-12-16T17:56:41,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/307f6d1dc0ec4087b0538cd1ede94c5a 2024-12-16T17:56:41,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-16T17:56:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d8a6dda673a1421d8ee759326067306f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a6dda673a1421d8ee759326067306f 2024-12-16T17:56:41,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a6dda673a1421d8ee759326067306f, entries=150, sequenceid=133, filesize=11.8 K 2024-12-16T17:56:41,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/4c4d4d89e1d04455b7fd11b687c00e2f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/4c4d4d89e1d04455b7fd11b687c00e2f 2024-12-16T17:56:41,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/4c4d4d89e1d04455b7fd11b687c00e2f, entries=150, sequenceid=133, filesize=11.8 K 2024-12-16T17:56:41,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/307f6d1dc0ec4087b0538cd1ede94c5a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/307f6d1dc0ec4087b0538cd1ede94c5a 2024-12-16T17:56:41,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/307f6d1dc0ec4087b0538cd1ede94c5a, entries=150, sequenceid=133, filesize=11.8 K 2024-12-16T17:56:41,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 99318ad6c4e7b8782230d738424ff705 in 551ms, sequenceid=133, compaction requested=true 2024-12-16T17:56:41,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:41,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:41,494 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:41,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:41,494 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:41,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:41,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:41,496 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:41,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:41,496 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:41,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:41,496 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:41,496 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:41,496 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,496 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,497 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/87ad12d6fa474caa9e3cb9fb28257fb6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/13c44e4d2dc242dc9b01ea664127fc43, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/4c4d4d89e1d04455b7fd11b687c00e2f] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.5 K 2024-12-16T17:56:41,497 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/48ad14bc92fe4d76800e031cc7bd4e3a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d0b4ee1360ac4a698c6bdc3535dcdd36, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a6dda673a1421d8ee759326067306f] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.5 K 2024-12-16T17:56:41,497 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48ad14bc92fe4d76800e031cc7bd4e3a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734371799606 2024-12-16T17:56:41,498 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0b4ee1360ac4a698c6bdc3535dcdd36, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734371800270 2024-12-16T17:56:41,498 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 87ad12d6fa474caa9e3cb9fb28257fb6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734371799606 2024-12-16T17:56:41,499 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8a6dda673a1421d8ee759326067306f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371800614 2024-12-16T17:56:41,499 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 13c44e4d2dc242dc9b01ea664127fc43, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734371800270 2024-12-16T17:56:41,500 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c4d4d89e1d04455b7fd11b687c00e2f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371800614 2024-12-16T17:56:41,530 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#28 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:41,530 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:41,530 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/701acd63c17c4387bcee377689bbf751 is 50, key is test_row_0/A:col10/1734371800614/Put/seqid=0 2024-12-16T17:56:41,531 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/54defcbe352d42c08317b4f3b2a063e2 is 50, key is test_row_0/B:col10/1734371800614/Put/seqid=0 2024-12-16T17:56:41,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741866_1042 (size=12409) 2024-12-16T17:56:41,567 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/701acd63c17c4387bcee377689bbf751 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/701acd63c17c4387bcee377689bbf751 2024-12-16T17:56:41,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741867_1043 (size=12409) 2024-12-16T17:56:41,581 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 701acd63c17c4387bcee377689bbf751(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:41,581 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:41,581 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371801494; duration=0sec 2024-12-16T17:56:41,581 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:41,582 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:41,582 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:41,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T17:56:41,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:41,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-16T17:56:41,592 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:41,592 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:41,593 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:41,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:41,594 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/5f05729b96e54a61b2441059c8fb456e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1d755e8160be4b84928ba95be813cf7a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/307f6d1dc0ec4087b0538cd1ede94c5a] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=35.5 K 2024-12-16T17:56:41,596 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/54defcbe352d42c08317b4f3b2a063e2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/54defcbe352d42c08317b4f3b2a063e2 2024-12-16T17:56:41,597 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f05729b96e54a61b2441059c8fb456e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734371799606 2024-12-16T17:56:41,599 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d755e8160be4b84928ba95be813cf7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734371800270 2024-12-16T17:56:41,601 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 307f6d1dc0ec4087b0538cd1ede94c5a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371800614 2024-12-16T17:56:41,602 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/3b48c4f995724275ba6d079305393ec2 is 50, key is test_row_0/A:col10/1734371801591/Put/seqid=0 2024-12-16T17:56:41,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-16T17:56:41,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:41,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371861606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371861609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,616 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 54defcbe352d42c08317b4f3b2a063e2(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:41,616 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:41,616 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371801494; duration=0sec 2024-12-16T17:56:41,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,617 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:41,617 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:41,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371861610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371861612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371861612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741868_1044 (size=12151) 2024-12-16T17:56:41,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/3b48c4f995724275ba6d079305393ec2 2024-12-16T17:56:41,646 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#30 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:41,647 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/920ad4d878264c03a9890a8d5ec4edfb is 50, key is test_row_0/C:col10/1734371800614/Put/seqid=0 2024-12-16T17:56:41,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/9d6a6d67921549e4bb83bb0cd5565eae is 50, key is test_row_0/B:col10/1734371801591/Put/seqid=0 2024-12-16T17:56:41,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741869_1045 (size=12409) 2024-12-16T17:56:41,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741870_1046 (size=12151) 2024-12-16T17:56:41,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/9d6a6d67921549e4bb83bb0cd5565eae 2024-12-16T17:56:41,684 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/920ad4d878264c03a9890a8d5ec4edfb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/920ad4d878264c03a9890a8d5ec4edfb 2024-12-16T17:56:41,699 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 920ad4d878264c03a9890a8d5ec4edfb(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:41,699 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:41,699 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371801496; duration=0sec 2024-12-16T17:56:41,699 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:41,699 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:41,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/70756b4aac4c469c9e1351a0fdd5f04a is 50, key is test_row_0/C:col10/1734371801591/Put/seqid=0 2024-12-16T17:56:41,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741871_1047 (size=12151) 2024-12-16T17:56:41,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371861718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/70756b4aac4c469c9e1351a0fdd5f04a 2024-12-16T17:56:41,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371861718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371861719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371861719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371861721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/3b48c4f995724275ba6d079305393ec2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3b48c4f995724275ba6d079305393ec2 2024-12-16T17:56:41,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3b48c4f995724275ba6d079305393ec2, entries=150, sequenceid=157, filesize=11.9 K 2024-12-16T17:56:41,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/9d6a6d67921549e4bb83bb0cd5565eae as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9d6a6d67921549e4bb83bb0cd5565eae 2024-12-16T17:56:41,757 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-16T17:56:41,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:41,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:41,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9d6a6d67921549e4bb83bb0cd5565eae, entries=150, sequenceid=157, filesize=11.9 K 2024-12-16T17:56:41,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/70756b4aac4c469c9e1351a0fdd5f04a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/70756b4aac4c469c9e1351a0fdd5f04a 2024-12-16T17:56:41,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/70756b4aac4c469c9e1351a0fdd5f04a, entries=150, sequenceid=157, filesize=11.9 K 2024-12-16T17:56:41,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 99318ad6c4e7b8782230d738424ff705 in 194ms, sequenceid=157, compaction requested=false 2024-12-16T17:56:41,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:41,913 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-16T17:56:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:41,914 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:56:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:41,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:41,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:41,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:41,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:41,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:41,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/b851fb47b6bb4be9ac4c137c20c14b85 is 50, key is test_row_0/A:col10/1734371801608/Put/seqid=0 2024-12-16T17:56:41,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:41,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:41,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741872_1048 (size=12151) 2024-12-16T17:56:41,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371861951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371861953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371861960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371861960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:41,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:41,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371861960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371862062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371862062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371862066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371862067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371862068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T17:56:42,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371862267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371862268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371862270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371862271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371862271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,344 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/b851fb47b6bb4be9ac4c137c20c14b85 2024-12-16T17:56:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7c7649317a6042bb8756e3e3a67b47f9 is 50, key is test_row_0/B:col10/1734371801608/Put/seqid=0 2024-12-16T17:56:42,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741873_1049 (size=12151) 2024-12-16T17:56:42,379 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7c7649317a6042bb8756e3e3a67b47f9 2024-12-16T17:56:42,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/40ebe94e2aad4dcf8cbd1ef2c208233e is 50, key is test_row_0/C:col10/1734371801608/Put/seqid=0 2024-12-16T17:56:42,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741874_1050 (size=12151) 2024-12-16T17:56:42,424 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/40ebe94e2aad4dcf8cbd1ef2c208233e 2024-12-16T17:56:42,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/b851fb47b6bb4be9ac4c137c20c14b85 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b851fb47b6bb4be9ac4c137c20c14b85 2024-12-16T17:56:42,448 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b851fb47b6bb4be9ac4c137c20c14b85, entries=150, sequenceid=172, filesize=11.9 K 2024-12-16T17:56:42,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7c7649317a6042bb8756e3e3a67b47f9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7c7649317a6042bb8756e3e3a67b47f9 2024-12-16T17:56:42,463 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7c7649317a6042bb8756e3e3a67b47f9, entries=150, sequenceid=172, filesize=11.9 K 2024-12-16T17:56:42,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/40ebe94e2aad4dcf8cbd1ef2c208233e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/40ebe94e2aad4dcf8cbd1ef2c208233e 2024-12-16T17:56:42,476 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/40ebe94e2aad4dcf8cbd1ef2c208233e, entries=150, sequenceid=172, filesize=11.9 K 2024-12-16T17:56:42,477 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 99318ad6c4e7b8782230d738424ff705 in 563ms, sequenceid=172, compaction requested=true 2024-12-16T17:56:42,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:42,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:42,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-16T17:56:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-16T17:56:42,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-16T17:56:42,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4940 sec 2024-12-16T17:56:42,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.5040 sec 2024-12-16T17:56:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:42,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:56:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:42,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/c67b06fb67a946a4a8400d1ff456f636 is 50, key is test_row_0/A:col10/1734371801955/Put/seqid=0 2024-12-16T17:56:42,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371862584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371862584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371862589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371862590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371862590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741875_1051 (size=16931) 2024-12-16T17:56:42,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/c67b06fb67a946a4a8400d1ff456f636 2024-12-16T17:56:42,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/22fcf8a783dc4409a73b1dc7f8aa7213 is 50, key is test_row_0/B:col10/1734371801955/Put/seqid=0 2024-12-16T17:56:42,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741876_1052 (size=12151) 2024-12-16T17:56:42,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/22fcf8a783dc4409a73b1dc7f8aa7213 2024-12-16T17:56:42,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/3d5d332013304459b681ebba91dd22ff is 50, key is test_row_0/C:col10/1734371801955/Put/seqid=0 2024-12-16T17:56:42,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741877_1053 (size=12151) 2024-12-16T17:56:42,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/3d5d332013304459b681ebba91dd22ff 2024-12-16T17:56:42,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/c67b06fb67a946a4a8400d1ff456f636 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c67b06fb67a946a4a8400d1ff456f636 2024-12-16T17:56:42,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c67b06fb67a946a4a8400d1ff456f636, entries=250, sequenceid=198, filesize=16.5 K 2024-12-16T17:56:42,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/22fcf8a783dc4409a73b1dc7f8aa7213 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/22fcf8a783dc4409a73b1dc7f8aa7213 2024-12-16T17:56:42,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/22fcf8a783dc4409a73b1dc7f8aa7213, entries=150, sequenceid=198, filesize=11.9 K 2024-12-16T17:56:42,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/3d5d332013304459b681ebba91dd22ff as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3d5d332013304459b681ebba91dd22ff 2024-12-16T17:56:42,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371862695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371862695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371862696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371862696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371862696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3d5d332013304459b681ebba91dd22ff, entries=150, sequenceid=198, filesize=11.9 K 2024-12-16T17:56:42,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 99318ad6c4e7b8782230d738424ff705 in 137ms, sequenceid=198, compaction requested=true 2024-12-16T17:56:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:42,712 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:42,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:42,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:42,713 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:42,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:42,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:42,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:42,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:42,714 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53642 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:42,715 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:42,715 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:42,715 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/701acd63c17c4387bcee377689bbf751, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3b48c4f995724275ba6d079305393ec2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b851fb47b6bb4be9ac4c137c20c14b85, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c67b06fb67a946a4a8400d1ff456f636] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=52.4 K 2024-12-16T17:56:42,716 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:42,716 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:42,716 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:42,716 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/54defcbe352d42c08317b4f3b2a063e2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9d6a6d67921549e4bb83bb0cd5565eae, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7c7649317a6042bb8756e3e3a67b47f9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/22fcf8a783dc4409a73b1dc7f8aa7213] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=47.7 K 2024-12-16T17:56:42,717 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 701acd63c17c4387bcee377689bbf751, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371800614 2024-12-16T17:56:42,717 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 54defcbe352d42c08317b4f3b2a063e2, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371800614 2024-12-16T17:56:42,718 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b48c4f995724275ba6d079305393ec2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371800965 2024-12-16T17:56:42,718 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d6a6d67921549e4bb83bb0cd5565eae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371800965 2024-12-16T17:56:42,718 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting b851fb47b6bb4be9ac4c137c20c14b85, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734371801608 2024-12-16T17:56:42,719 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c7649317a6042bb8756e3e3a67b47f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734371801608 2024-12-16T17:56:42,719 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c67b06fb67a946a4a8400d1ff456f636, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734371801955 2024-12-16T17:56:42,719 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 22fcf8a783dc4409a73b1dc7f8aa7213, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734371801955 2024-12-16T17:56:42,740 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#39 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:42,741 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#40 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:42,741 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/e384afab16b34fd3b855efd42db768d8 is 50, key is test_row_0/B:col10/1734371801955/Put/seqid=0 2024-12-16T17:56:42,741 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/dd7976d569124dd7a7746eaa362becb7 is 50, key is test_row_0/A:col10/1734371801955/Put/seqid=0 2024-12-16T17:56:42,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741878_1054 (size=12595) 2024-12-16T17:56:42,770 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/e384afab16b34fd3b855efd42db768d8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e384afab16b34fd3b855efd42db768d8 2024-12-16T17:56:42,780 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into e384afab16b34fd3b855efd42db768d8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:42,780 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:42,780 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=12, startTime=1734371802713; duration=0sec 2024-12-16T17:56:42,781 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:42,781 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:42,781 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:42,783 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:42,783 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:42,784 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:42,785 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/920ad4d878264c03a9890a8d5ec4edfb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/70756b4aac4c469c9e1351a0fdd5f04a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/40ebe94e2aad4dcf8cbd1ef2c208233e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3d5d332013304459b681ebba91dd22ff] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=47.7 K 2024-12-16T17:56:42,786 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 920ad4d878264c03a9890a8d5ec4edfb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371800614 2024-12-16T17:56:42,787 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 70756b4aac4c469c9e1351a0fdd5f04a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371800965 2024-12-16T17:56:42,787 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 40ebe94e2aad4dcf8cbd1ef2c208233e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734371801608 2024-12-16T17:56:42,788 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d5d332013304459b681ebba91dd22ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734371801955 2024-12-16T17:56:42,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741879_1055 (size=12595) 2024-12-16T17:56:42,799 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/dd7976d569124dd7a7746eaa362becb7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/dd7976d569124dd7a7746eaa362becb7 2024-12-16T17:56:42,809 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into dd7976d569124dd7a7746eaa362becb7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:42,809 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:42,809 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=12, startTime=1734371802712; duration=0sec 2024-12-16T17:56:42,809 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:42,809 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:42,819 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#41 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:42,820 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/d97c841578a540cf845fd7c40280017b is 50, key is test_row_0/C:col10/1734371801955/Put/seqid=0 2024-12-16T17:56:42,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741880_1056 (size=12595) 2024-12-16T17:56:42,850 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/d97c841578a540cf845fd7c40280017b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/d97c841578a540cf845fd7c40280017b 2024-12-16T17:56:42,863 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into d97c841578a540cf845fd7c40280017b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:42,863 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:42,863 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=12, startTime=1734371802713; duration=0sec 2024-12-16T17:56:42,864 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:42,864 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:42,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-16T17:56:42,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:42,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:42,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:42,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:42,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:42,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:42,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/1b99178b095441679aa4d2c74989a155 is 50, key is test_row_0/A:col10/1734371802908/Put/seqid=0 2024-12-16T17:56:42,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741881_1057 (size=16927) 2024-12-16T17:56:42,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/1b99178b095441679aa4d2c74989a155 2024-12-16T17:56:42,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7f6b1ee7cace436b9ffa9007d42e850e is 50, key is test_row_0/B:col10/1734371802908/Put/seqid=0 2024-12-16T17:56:42,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371862952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371862952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741882_1058 (size=9757) 2024-12-16T17:56:42,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7f6b1ee7cace436b9ffa9007d42e850e 2024-12-16T17:56:42,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371862958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371862958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:42,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371862958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:42,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/2bcabae6955648f0a29e5ef3b9ef4555 is 50, key is test_row_0/C:col10/1734371802908/Put/seqid=0 2024-12-16T17:56:42,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741883_1059 (size=9757) 2024-12-16T17:56:42,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/2bcabae6955648f0a29e5ef3b9ef4555 2024-12-16T17:56:42,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/1b99178b095441679aa4d2c74989a155 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1b99178b095441679aa4d2c74989a155 2024-12-16T17:56:43,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1b99178b095441679aa4d2c74989a155, entries=250, sequenceid=214, filesize=16.5 K 2024-12-16T17:56:43,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7f6b1ee7cace436b9ffa9007d42e850e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7f6b1ee7cace436b9ffa9007d42e850e 2024-12-16T17:56:43,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7f6b1ee7cace436b9ffa9007d42e850e, entries=100, sequenceid=214, filesize=9.5 K 2024-12-16T17:56:43,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/2bcabae6955648f0a29e5ef3b9ef4555 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/2bcabae6955648f0a29e5ef3b9ef4555 2024-12-16T17:56:43,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/2bcabae6955648f0a29e5ef3b9ef4555, entries=100, sequenceid=214, filesize=9.5 K 2024-12-16T17:56:43,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 99318ad6c4e7b8782230d738424ff705 in 127ms, sequenceid=214, compaction requested=false 2024-12-16T17:56:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:43,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:43,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:56:43,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:43,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:43,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:43,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:43,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:43,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:43,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/18639d61e1b94d1a96ffaa942f004997 is 50, key is test_row_0/A:col10/1734371803062/Put/seqid=0 2024-12-16T17:56:43,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371863075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371863076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371863077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371863081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371863083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T17:56:43,092 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-16T17:56:43,094 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:43,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741884_1060 (size=16931) 2024-12-16T17:56:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-16T17:56:43,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/18639d61e1b94d1a96ffaa942f004997 2024-12-16T17:56:43,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T17:56:43,098 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:43,099 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:43,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:43,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/edab2f3dc1a14829aed8187ebdc18bb8 is 50, key is test_row_0/B:col10/1734371803062/Put/seqid=0 2024-12-16T17:56:43,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741885_1061 (size=12151) 2024-12-16T17:56:43,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371863183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371863184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371863185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371863188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371863190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T17:56:43,267 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:43,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:43,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371863389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371863390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371863391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371863392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371863396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T17:56:43,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:43,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:43,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/edab2f3dc1a14829aed8187ebdc18bb8 2024-12-16T17:56:43,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/cfbb7888409c4717b349db432a105d6e is 50, key is test_row_0/C:col10/1734371803062/Put/seqid=0 2024-12-16T17:56:43,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741886_1062 (size=12151) 2024-12-16T17:56:43,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/cfbb7888409c4717b349db432a105d6e 2024-12-16T17:56:43,578 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:43,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/18639d61e1b94d1a96ffaa942f004997 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/18639d61e1b94d1a96ffaa942f004997 2024-12-16T17:56:43,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:43,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/18639d61e1b94d1a96ffaa942f004997, entries=250, sequenceid=240, filesize=16.5 K 2024-12-16T17:56:43,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/edab2f3dc1a14829aed8187ebdc18bb8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/edab2f3dc1a14829aed8187ebdc18bb8 2024-12-16T17:56:43,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/edab2f3dc1a14829aed8187ebdc18bb8, entries=150, sequenceid=240, filesize=11.9 K 2024-12-16T17:56:43,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/cfbb7888409c4717b349db432a105d6e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/cfbb7888409c4717b349db432a105d6e 2024-12-16T17:56:43,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/cfbb7888409c4717b349db432a105d6e, entries=150, sequenceid=240, filesize=11.9 K 2024-12-16T17:56:43,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 99318ad6c4e7b8782230d738424ff705 in 563ms, sequenceid=240, compaction requested=true 2024-12-16T17:56:43,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:43,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:43,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:43,626 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:43,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:43,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:43,627 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:43,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:43,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:43,628 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:43,628 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 46453 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:43,628 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:43,628 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:43,628 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,628 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,628 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e384afab16b34fd3b855efd42db768d8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7f6b1ee7cace436b9ffa9007d42e850e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/edab2f3dc1a14829aed8187ebdc18bb8] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=33.7 K 2024-12-16T17:56:43,628 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/dd7976d569124dd7a7746eaa362becb7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1b99178b095441679aa4d2c74989a155, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/18639d61e1b94d1a96ffaa942f004997] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=45.4 K 2024-12-16T17:56:43,629 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd7976d569124dd7a7746eaa362becb7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734371801955 2024-12-16T17:56:43,629 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e384afab16b34fd3b855efd42db768d8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734371801955 2024-12-16T17:56:43,629 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f6b1ee7cace436b9ffa9007d42e850e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1734371802588 2024-12-16T17:56:43,629 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b99178b095441679aa4d2c74989a155, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1734371802587 2024-12-16T17:56:43,630 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting edab2f3dc1a14829aed8187ebdc18bb8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371802946 2024-12-16T17:56:43,630 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18639d61e1b94d1a96ffaa942f004997, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371802946 2024-12-16T17:56:43,641 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#48 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:43,642 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:43,642 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/10aadf957f554aab979bd3096f233e91 is 50, key is test_row_0/A:col10/1734371803062/Put/seqid=0 2024-12-16T17:56:43,643 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/311e93ab16554161bc98b6d49e813c60 is 50, key is test_row_0/B:col10/1734371803062/Put/seqid=0 2024-12-16T17:56:43,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741887_1063 (size=12697) 2024-12-16T17:56:43,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741888_1064 (size=12697) 2024-12-16T17:56:43,688 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/311e93ab16554161bc98b6d49e813c60 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/311e93ab16554161bc98b6d49e813c60 2024-12-16T17:56:43,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:43,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:56:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:43,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:43,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:43,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:43,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:43,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T17:56:43,703 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 311e93ab16554161bc98b6d49e813c60(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:43,703 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:43,703 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371803627; duration=0sec 2024-12-16T17:56:43,703 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:43,703 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:43,703 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:43,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/eda0a2fdec314a52a2ede1db1dfa8e79 is 50, key is test_row_0/A:col10/1734371803694/Put/seqid=0 2024-12-16T17:56:43,707 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:43,707 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:43,708 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,708 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/d97c841578a540cf845fd7c40280017b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/2bcabae6955648f0a29e5ef3b9ef4555, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/cfbb7888409c4717b349db432a105d6e] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=33.7 K 2024-12-16T17:56:43,710 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d97c841578a540cf845fd7c40280017b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734371801955 2024-12-16T17:56:43,710 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bcabae6955648f0a29e5ef3b9ef4555, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1734371802588 2024-12-16T17:56:43,711 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting cfbb7888409c4717b349db432a105d6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371802946 2024-12-16T17:56:43,723 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:43,724 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/69c32887809b456abb6b2c7d7f3c4903 is 50, key is test_row_0/C:col10/1734371803062/Put/seqid=0 2024-12-16T17:56:43,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741889_1065 (size=12151) 2024-12-16T17:56:43,733 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/eda0a2fdec314a52a2ede1db1dfa8e79 2024-12-16T17:56:43,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:43,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741890_1066 (size=12697) 2024-12-16T17:56:43,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:43,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,741 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/69c32887809b456abb6b2c7d7f3c4903 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/69c32887809b456abb6b2c7d7f3c4903 2024-12-16T17:56:43,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/ecd1dc900f95461fa2e7f9e963aa4a40 is 50, key is test_row_0/B:col10/1734371803694/Put/seqid=0 2024-12-16T17:56:43,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741891_1067 (size=12151) 2024-12-16T17:56:43,774 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 69c32887809b456abb6b2c7d7f3c4903(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:43,774 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:43,775 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371803627; duration=0sec 2024-12-16T17:56:43,776 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:43,776 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:43,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371863787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371863787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371863787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371863789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371863789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:43,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:43,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:43,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:43,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371863893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371863894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371863894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371863895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:43,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:43,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371863896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:44,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:44,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:44,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:44,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:44,067 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/10aadf957f554aab979bd3096f233e91 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/10aadf957f554aab979bd3096f233e91 2024-12-16T17:56:44,076 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 10aadf957f554aab979bd3096f233e91(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:44,076 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:44,076 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371803626; duration=0sec 2024-12-16T17:56:44,076 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:44,076 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:44,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371864098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371864099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371864100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371864100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371864101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/ecd1dc900f95461fa2e7f9e963aa4a40 2024-12-16T17:56:44,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/4f67f22f2ef24477a403be93dc85fc8a is 50, key is test_row_0/C:col10/1734371803694/Put/seqid=0 2024-12-16T17:56:44,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741892_1068 (size=12151) 2024-12-16T17:56:44,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:44,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:44,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/4f67f22f2ef24477a403be93dc85fc8a 2024-12-16T17:56:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:44,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T17:56:44,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/eda0a2fdec314a52a2ede1db1dfa8e79 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/eda0a2fdec314a52a2ede1db1dfa8e79 2024-12-16T17:56:44,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/eda0a2fdec314a52a2ede1db1dfa8e79, entries=150, sequenceid=253, filesize=11.9 K 2024-12-16T17:56:44,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/ecd1dc900f95461fa2e7f9e963aa4a40 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/ecd1dc900f95461fa2e7f9e963aa4a40 2024-12-16T17:56:44,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/ecd1dc900f95461fa2e7f9e963aa4a40, entries=150, sequenceid=253, filesize=11.9 K 2024-12-16T17:56:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/4f67f22f2ef24477a403be93dc85fc8a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4f67f22f2ef24477a403be93dc85fc8a 2024-12-16T17:56:44,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4f67f22f2ef24477a403be93dc85fc8a, entries=150, sequenceid=253, filesize=11.9 K 2024-12-16T17:56:44,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 99318ad6c4e7b8782230d738424ff705 in 543ms, sequenceid=253, compaction requested=false 2024-12-16T17:56:44,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:44,351 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T17:56:44,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:44,353 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-16T17:56:44,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:44,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:44,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:44,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:44,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:44,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:44,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/f1e3608f5a44478f85e6297ea90bec0c is 50, key is test_row_0/A:col10/1734371803786/Put/seqid=0 2024-12-16T17:56:44,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741893_1069 (size=12301) 2024-12-16T17:56:44,376 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/f1e3608f5a44478f85e6297ea90bec0c 2024-12-16T17:56:44,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/f6470d1101c54d378c62bdd31cf29f9f is 50, key is test_row_0/B:col10/1734371803786/Put/seqid=0 2024-12-16T17:56:44,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:44,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:44,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741894_1070 (size=12301) 2024-12-16T17:56:44,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371864412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371864411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371864415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371864416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371864417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371864518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371864520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371864520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371864520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371864521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371864720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371864725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371864725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371864726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:44,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371864727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:44,811 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/f6470d1101c54d378c62bdd31cf29f9f 2024-12-16T17:56:44,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/6439325f4caf44969f9c3f2f78fdafb3 is 50, key is test_row_0/C:col10/1734371803786/Put/seqid=0 2024-12-16T17:56:44,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741895_1071 (size=12301) 2024-12-16T17:56:45,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371865025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371865029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371865029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371865031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371865032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T17:56:45,239 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/6439325f4caf44969f9c3f2f78fdafb3 2024-12-16T17:56:45,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/f1e3608f5a44478f85e6297ea90bec0c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/f1e3608f5a44478f85e6297ea90bec0c 2024-12-16T17:56:45,252 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/f1e3608f5a44478f85e6297ea90bec0c, entries=150, sequenceid=279, filesize=12.0 K 2024-12-16T17:56:45,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/f6470d1101c54d378c62bdd31cf29f9f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/f6470d1101c54d378c62bdd31cf29f9f 2024-12-16T17:56:45,262 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/f6470d1101c54d378c62bdd31cf29f9f, entries=150, sequenceid=279, filesize=12.0 K 2024-12-16T17:56:45,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/6439325f4caf44969f9c3f2f78fdafb3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/6439325f4caf44969f9c3f2f78fdafb3 2024-12-16T17:56:45,272 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/6439325f4caf44969f9c3f2f78fdafb3, entries=150, sequenceid=279, filesize=12.0 K 2024-12-16T17:56:45,274 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 99318ad6c4e7b8782230d738424ff705 in 921ms, sequenceid=279, compaction requested=true 2024-12-16T17:56:45,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:45,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:45,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-16T17:56:45,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-16T17:56:45,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-16T17:56:45,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1760 sec 2024-12-16T17:56:45,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.1860 sec 2024-12-16T17:56:45,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:45,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-16T17:56:45,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:45,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:45,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:45,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:45,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:45,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:45,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/98c10f93cc0e4e018d39cd97f9db94ee is 50, key is test_row_0/A:col10/1734371804414/Put/seqid=0 2024-12-16T17:56:45,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741896_1072 (size=14741) 2024-12-16T17:56:45,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371865560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371865561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371865564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371865569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371865569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371865670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371865670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371865670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371865674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371865675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371865872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371865873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371865873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371865880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371865880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:45,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/98c10f93cc0e4e018d39cd97f9db94ee 2024-12-16T17:56:45,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/36e315ee0cbe4cf58e10817126dcd534 is 50, key is test_row_0/B:col10/1734371804414/Put/seqid=0 2024-12-16T17:56:45,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741897_1073 (size=12301) 2024-12-16T17:56:46,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371866176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371866177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371866178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371866183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371866184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/36e315ee0cbe4cf58e10817126dcd534 2024-12-16T17:56:46,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c70d95ce77ea4ef0b0a494b1a57eca66 is 50, key is test_row_0/C:col10/1734371804414/Put/seqid=0 2024-12-16T17:56:46,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741898_1074 (size=12301) 2024-12-16T17:56:46,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c70d95ce77ea4ef0b0a494b1a57eca66 2024-12-16T17:56:46,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/98c10f93cc0e4e018d39cd97f9db94ee as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/98c10f93cc0e4e018d39cd97f9db94ee 2024-12-16T17:56:46,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/98c10f93cc0e4e018d39cd97f9db94ee, entries=200, sequenceid=294, filesize=14.4 K 2024-12-16T17:56:46,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/36e315ee0cbe4cf58e10817126dcd534 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/36e315ee0cbe4cf58e10817126dcd534 2024-12-16T17:56:46,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/36e315ee0cbe4cf58e10817126dcd534, entries=150, sequenceid=294, filesize=12.0 K 2024-12-16T17:56:46,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c70d95ce77ea4ef0b0a494b1a57eca66 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c70d95ce77ea4ef0b0a494b1a57eca66 2024-12-16T17:56:46,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c70d95ce77ea4ef0b0a494b1a57eca66, entries=150, sequenceid=294, filesize=12.0 K 2024-12-16T17:56:46,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 99318ad6c4e7b8782230d738424ff705 in 944ms, sequenceid=294, compaction requested=true 2024-12-16T17:56:46,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:46,479 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:46,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:46,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:46,481 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:46,481 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51890 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:46,481 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:46,482 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:46,482 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/10aadf957f554aab979bd3096f233e91, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/eda0a2fdec314a52a2ede1db1dfa8e79, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/f1e3608f5a44478f85e6297ea90bec0c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/98c10f93cc0e4e018d39cd97f9db94ee] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=50.7 K 2024-12-16T17:56:46,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:46,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:46,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:46,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:46,483 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10aadf957f554aab979bd3096f233e91, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371802946 2024-12-16T17:56:46,484 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting eda0a2fdec314a52a2ede1db1dfa8e79, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371803077 2024-12-16T17:56:46,485 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1e3608f5a44478f85e6297ea90bec0c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1734371803783 2024-12-16T17:56:46,485 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98c10f93cc0e4e018d39cd97f9db94ee, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734371804413 2024-12-16T17:56:46,485 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:46,485 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:46,485 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:46,485 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/311e93ab16554161bc98b6d49e813c60, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/ecd1dc900f95461fa2e7f9e963aa4a40, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/f6470d1101c54d378c62bdd31cf29f9f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/36e315ee0cbe4cf58e10817126dcd534] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=48.3 K 2024-12-16T17:56:46,486 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 311e93ab16554161bc98b6d49e813c60, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371802946 2024-12-16T17:56:46,487 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ecd1dc900f95461fa2e7f9e963aa4a40, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371803077 2024-12-16T17:56:46,488 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting f6470d1101c54d378c62bdd31cf29f9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1734371803783 2024-12-16T17:56:46,488 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 36e315ee0cbe4cf58e10817126dcd534, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734371804414 2024-12-16T17:56:46,516 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#60 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:46,516 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:46,517 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/1c434a0d317a4709a57b7e2a11f417de is 50, key is test_row_0/A:col10/1734371804414/Put/seqid=0 2024-12-16T17:56:46,517 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/fc315482f6ec492599ec159804df2a6f is 50, key is test_row_0/B:col10/1734371804414/Put/seqid=0 2024-12-16T17:56:46,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741899_1075 (size=12983) 2024-12-16T17:56:46,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741900_1076 (size=12983) 2024-12-16T17:56:46,547 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/fc315482f6ec492599ec159804df2a6f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/fc315482f6ec492599ec159804df2a6f 2024-12-16T17:56:46,552 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/1c434a0d317a4709a57b7e2a11f417de as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1c434a0d317a4709a57b7e2a11f417de 2024-12-16T17:56:46,564 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 1c434a0d317a4709a57b7e2a11f417de(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:46,564 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:46,564 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=12, startTime=1734371806479; duration=0sec 2024-12-16T17:56:46,565 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into fc315482f6ec492599ec159804df2a6f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:46,565 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:46,565 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:46,565 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:46,565 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=12, startTime=1734371806481; duration=0sec 2024-12-16T17:56:46,565 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:46,565 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:46,565 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:46,567 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:46,567 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:46,567 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:46,567 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/69c32887809b456abb6b2c7d7f3c4903, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4f67f22f2ef24477a403be93dc85fc8a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/6439325f4caf44969f9c3f2f78fdafb3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c70d95ce77ea4ef0b0a494b1a57eca66] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=48.3 K 2024-12-16T17:56:46,568 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69c32887809b456abb6b2c7d7f3c4903, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371802946 2024-12-16T17:56:46,568 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f67f22f2ef24477a403be93dc85fc8a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371803077 2024-12-16T17:56:46,569 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6439325f4caf44969f9c3f2f78fdafb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1734371803783 2024-12-16T17:56:46,569 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c70d95ce77ea4ef0b0a494b1a57eca66, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734371804414 2024-12-16T17:56:46,579 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#62 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:46,580 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/dbfa675837164fdc948c7e4290a75e53 is 50, key is test_row_0/C:col10/1734371804414/Put/seqid=0 2024-12-16T17:56:46,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741901_1077 (size=12983) 2024-12-16T17:56:46,600 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/dbfa675837164fdc948c7e4290a75e53 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/dbfa675837164fdc948c7e4290a75e53 2024-12-16T17:56:46,610 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into dbfa675837164fdc948c7e4290a75e53(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:46,610 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:46,610 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=12, startTime=1734371806482; duration=0sec 2024-12-16T17:56:46,610 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:46,611 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:46,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-16T17:56:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:46,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:46,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:46,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:46,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:46,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:46,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:46,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/b41bb71d1fe5424ca7b4d887255411ea is 50, key is test_row_0/A:col10/1734371806686/Put/seqid=0 2024-12-16T17:56:46,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371866699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371866700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371866702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371866704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371866704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741902_1078 (size=14741) 2024-12-16T17:56:46,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/b41bb71d1fe5424ca7b4d887255411ea 2024-12-16T17:56:46,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/05f444bf98ee420fa04c7adf211ea2e0 is 50, key is test_row_0/B:col10/1734371806686/Put/seqid=0 2024-12-16T17:56:46,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741903_1079 (size=12301) 2024-12-16T17:56:46,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/05f444bf98ee420fa04c7adf211ea2e0 2024-12-16T17:56:46,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/7c69e1d4406649aabdb17364f9b830f7 is 50, key is test_row_0/C:col10/1734371806686/Put/seqid=0 2024-12-16T17:56:46,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741904_1080 (size=12301) 2024-12-16T17:56:46,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/7c69e1d4406649aabdb17364f9b830f7 2024-12-16T17:56:46,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/b41bb71d1fe5424ca7b4d887255411ea as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b41bb71d1fe5424ca7b4d887255411ea 2024-12-16T17:56:46,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371866806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371866807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371866806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371866807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371866807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:46,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b41bb71d1fe5424ca7b4d887255411ea, entries=200, sequenceid=321, filesize=14.4 K 2024-12-16T17:56:46,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/05f444bf98ee420fa04c7adf211ea2e0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/05f444bf98ee420fa04c7adf211ea2e0 2024-12-16T17:56:46,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/05f444bf98ee420fa04c7adf211ea2e0, entries=150, sequenceid=321, filesize=12.0 K 2024-12-16T17:56:46,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/7c69e1d4406649aabdb17364f9b830f7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/7c69e1d4406649aabdb17364f9b830f7 2024-12-16T17:56:46,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/7c69e1d4406649aabdb17364f9b830f7, entries=150, sequenceid=321, filesize=12.0 K 2024-12-16T17:56:46,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 99318ad6c4e7b8782230d738424ff705 in 149ms, sequenceid=321, compaction requested=false 2024-12-16T17:56:46,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:47,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:47,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-16T17:56:47,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:47,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:47,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:47,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/df27eec3e2204212b9dba0d5128a7135 is 50, key is test_row_0/A:col10/1734371807010/Put/seqid=0 2024-12-16T17:56:47,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371867033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371867033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371867033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371867034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371867039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741905_1081 (size=17181) 2024-12-16T17:56:47,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/df27eec3e2204212b9dba0d5128a7135 2024-12-16T17:56:47,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/5e1f7ad5baab4824bfb9337b1e32dcce is 50, key is test_row_0/B:col10/1734371807010/Put/seqid=0 2024-12-16T17:56:47,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741906_1082 (size=12301) 2024-12-16T17:56:47,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/5e1f7ad5baab4824bfb9337b1e32dcce 2024-12-16T17:56:47,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/9776a319c22f48eeb867f82c243066bb is 50, key is test_row_0/C:col10/1734371807010/Put/seqid=0 2024-12-16T17:56:47,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741907_1083 (size=12301) 2024-12-16T17:56:47,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371867141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371867141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371867141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371867141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371867141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T17:56:47,205 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-16T17:56:47,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:47,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-16T17:56:47,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-16T17:56:47,208 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:47,209 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:47,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:47,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-16T17:56:47,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371867346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371867347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371867347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371867347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371867347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,361 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-16T17:56:47,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:47,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/9776a319c22f48eeb867f82c243066bb 2024-12-16T17:56:47,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-16T17:56:47,515 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-16T17:56:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/df27eec3e2204212b9dba0d5128a7135 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/df27eec3e2204212b9dba0d5128a7135 2024-12-16T17:56:47,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/df27eec3e2204212b9dba0d5128a7135, entries=250, sequenceid=336, filesize=16.8 K 2024-12-16T17:56:47,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/5e1f7ad5baab4824bfb9337b1e32dcce as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5e1f7ad5baab4824bfb9337b1e32dcce 2024-12-16T17:56:47,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5e1f7ad5baab4824bfb9337b1e32dcce, entries=150, sequenceid=336, filesize=12.0 K 2024-12-16T17:56:47,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/9776a319c22f48eeb867f82c243066bb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9776a319c22f48eeb867f82c243066bb 2024-12-16T17:56:47,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9776a319c22f48eeb867f82c243066bb, entries=150, sequenceid=336, filesize=12.0 K 2024-12-16T17:56:47,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 99318ad6c4e7b8782230d738424ff705 in 535ms, sequenceid=336, compaction requested=true 2024-12-16T17:56:47,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:47,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:47,547 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:47,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:47,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:47,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:47,548 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:47,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:47,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:47,549 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44905 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:47,550 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:47,550 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,550 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1c434a0d317a4709a57b7e2a11f417de, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b41bb71d1fe5424ca7b4d887255411ea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/df27eec3e2204212b9dba0d5128a7135] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=43.9 K 2024-12-16T17:56:47,550 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c434a0d317a4709a57b7e2a11f417de, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734371804414 2024-12-16T17:56:47,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:47,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:47,551 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,551 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/fc315482f6ec492599ec159804df2a6f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/05f444bf98ee420fa04c7adf211ea2e0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5e1f7ad5baab4824bfb9337b1e32dcce] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=36.7 K 2024-12-16T17:56:47,551 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting b41bb71d1fe5424ca7b4d887255411ea, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734371805559 2024-12-16T17:56:47,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting fc315482f6ec492599ec159804df2a6f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734371804414 2024-12-16T17:56:47,552 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting df27eec3e2204212b9dba0d5128a7135, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1734371806694 2024-12-16T17:56:47,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 05f444bf98ee420fa04c7adf211ea2e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734371805559 2024-12-16T17:56:47,553 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e1f7ad5baab4824bfb9337b1e32dcce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1734371806694 2024-12-16T17:56:47,566 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:47,567 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/4c85751af8444d56b854664297602353 is 50, key is test_row_0/A:col10/1734371807010/Put/seqid=0 2024-12-16T17:56:47,574 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:47,575 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7886f7480dc14798b6635992b16822dc is 50, key is test_row_0/B:col10/1734371807010/Put/seqid=0 2024-12-16T17:56:47,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741908_1084 (size=13085) 2024-12-16T17:56:47,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741909_1085 (size=13085) 2024-12-16T17:56:47,609 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/4c85751af8444d56b854664297602353 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/4c85751af8444d56b854664297602353 2024-12-16T17:56:47,619 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7886f7480dc14798b6635992b16822dc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7886f7480dc14798b6635992b16822dc 2024-12-16T17:56:47,626 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 4c85751af8444d56b854664297602353(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:47,626 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:47,627 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371807547; duration=0sec 2024-12-16T17:56:47,627 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:47,627 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:47,627 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:47,629 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 7886f7480dc14798b6635992b16822dc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:47,629 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:47,629 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:47,629 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371807547; duration=0sec 2024-12-16T17:56:47,629 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:47,629 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,629 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:47,629 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:47,629 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/dbfa675837164fdc948c7e4290a75e53, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/7c69e1d4406649aabdb17364f9b830f7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9776a319c22f48eeb867f82c243066bb] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=36.7 K 2024-12-16T17:56:47,630 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbfa675837164fdc948c7e4290a75e53, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734371804414 2024-12-16T17:56:47,631 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c69e1d4406649aabdb17364f9b830f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734371805559 2024-12-16T17:56:47,632 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9776a319c22f48eeb867f82c243066bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1734371806694 2024-12-16T17:56:47,645 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#71 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:47,645 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bb22ff509e6241b08a1abb50d02de61e is 50, key is test_row_0/C:col10/1734371807010/Put/seqid=0 2024-12-16T17:56:47,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:47,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-16T17:56:47,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:47,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:47,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:47,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741910_1086 (size=13085) 2024-12-16T17:56:47,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371867662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371867663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371867664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371867665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371867667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,669 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-16T17:56:47,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:47,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/fada509f355c4ffab32353b70fd9a1e8 is 50, key is test_row_0/A:col10/1734371807652/Put/seqid=0 2024-12-16T17:56:47,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:47,680 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bb22ff509e6241b08a1abb50d02de61e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bb22ff509e6241b08a1abb50d02de61e 2024-12-16T17:56:47,691 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into bb22ff509e6241b08a1abb50d02de61e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:47,691 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:47,691 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371807548; duration=0sec 2024-12-16T17:56:47,691 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:47,691 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:47,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741911_1087 (size=12301) 2024-12-16T17:56:47,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/fada509f355c4ffab32353b70fd9a1e8 2024-12-16T17:56:47,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/296d2921ccbc4667ab2fdb1b8eba5cf7 is 50, key is test_row_0/B:col10/1734371807652/Put/seqid=0 2024-12-16T17:56:47,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741912_1088 (size=12301) 2024-12-16T17:56:47,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/296d2921ccbc4667ab2fdb1b8eba5cf7 2024-12-16T17:56:47,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/f15fac5870874dd6b56b706665d8e870 is 50, key is test_row_0/C:col10/1734371807652/Put/seqid=0 2024-12-16T17:56:47,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371867770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371867771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371867771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371867771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:47,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371867772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741913_1089 (size=12301) 2024-12-16T17:56:47,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/f15fac5870874dd6b56b706665d8e870 2024-12-16T17:56:47,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/fada509f355c4ffab32353b70fd9a1e8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/fada509f355c4ffab32353b70fd9a1e8 2024-12-16T17:56:47,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/fada509f355c4ffab32353b70fd9a1e8, entries=150, sequenceid=361, filesize=12.0 K 2024-12-16T17:56:47,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/296d2921ccbc4667ab2fdb1b8eba5cf7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/296d2921ccbc4667ab2fdb1b8eba5cf7 2024-12-16T17:56:47,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/296d2921ccbc4667ab2fdb1b8eba5cf7, entries=150, sequenceid=361, filesize=12.0 K 2024-12-16T17:56:47,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-16T17:56:47,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/f15fac5870874dd6b56b706665d8e870 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/f15fac5870874dd6b56b706665d8e870 2024-12-16T17:56:47,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/f15fac5870874dd6b56b706665d8e870, entries=150, sequenceid=361, filesize=12.0 K 2024-12-16T17:56:47,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 99318ad6c4e7b8782230d738424ff705 in 170ms, sequenceid=361, compaction requested=false 2024-12-16T17:56:47,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:47,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-16T17:56:47,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:47,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,823 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:56:47,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:47,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:47,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:47,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/5c98f35a9cad47ca910cffb8b6b32ed3 is 50, key is test_row_0/A:col10/1734371807664/Put/seqid=0 2024-12-16T17:56:47,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741914_1090 (size=9857) 2024-12-16T17:56:47,846 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/5c98f35a9cad47ca910cffb8b6b32ed3 2024-12-16T17:56:47,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3dffe07d19304867a19791a7d9183b1d is 50, key is test_row_0/B:col10/1734371807664/Put/seqid=0 2024-12-16T17:56:47,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741915_1091 (size=9857) 2024-12-16T17:56:47,883 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3dffe07d19304867a19791a7d9183b1d 2024-12-16T17:56:47,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/71bd2ce10a484919aad29c96e6169555 is 50, key is test_row_0/C:col10/1734371807664/Put/seqid=0 2024-12-16T17:56:47,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741916_1092 (size=9857) 2024-12-16T17:56:47,919 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/71bd2ce10a484919aad29c96e6169555 2024-12-16T17:56:47,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/5c98f35a9cad47ca910cffb8b6b32ed3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/5c98f35a9cad47ca910cffb8b6b32ed3 2024-12-16T17:56:47,934 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/5c98f35a9cad47ca910cffb8b6b32ed3, entries=100, sequenceid=375, filesize=9.6 K 2024-12-16T17:56:47,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3dffe07d19304867a19791a7d9183b1d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dffe07d19304867a19791a7d9183b1d 2024-12-16T17:56:47,947 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dffe07d19304867a19791a7d9183b1d, entries=100, sequenceid=375, filesize=9.6 K 2024-12-16T17:56:47,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/71bd2ce10a484919aad29c96e6169555 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/71bd2ce10a484919aad29c96e6169555 2024-12-16T17:56:47,957 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/71bd2ce10a484919aad29c96e6169555, entries=100, sequenceid=375, filesize=9.6 K 2024-12-16T17:56:47,959 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 99318ad6c4e7b8782230d738424ff705 in 136ms, sequenceid=375, compaction requested=true 2024-12-16T17:56:47,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:47,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:47,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-16T17:56:47,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-16T17:56:47,964 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-16T17:56:47,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 753 msec 2024-12-16T17:56:47,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 760 msec 2024-12-16T17:56:47,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:47,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:56:47,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:47,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:47,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:47,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:47,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/be05da72a17e4ff786a5e04e748077e2 is 50, key is test_row_0/A:col10/1734371807980/Put/seqid=0 2024-12-16T17:56:48,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741917_1093 (size=14741) 2024-12-16T17:56:48,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371868017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371868019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371868019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371868021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371868023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371868125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371868126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371868126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371868126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371868127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-16T17:56:48,312 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-16T17:56:48,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:48,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-16T17:56:48,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T17:56:48,316 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:48,317 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:48,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:48,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371868329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371868329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371868330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371868331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371868331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/be05da72a17e4ff786a5e04e748077e2 2024-12-16T17:56:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T17:56:48,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/0ebd3c1330c94aef958895f152d669b5 is 50, key is test_row_0/B:col10/1734371807980/Put/seqid=0 2024-12-16T17:56:48,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741918_1094 (size=12301) 2024-12-16T17:56:48,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-16T17:56:48,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:48,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T17:56:48,624 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-16T17:56:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371868631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371868634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371868634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371868634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:48,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371868644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-16T17:56:48,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:48,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:48,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/0ebd3c1330c94aef958895f152d669b5 2024-12-16T17:56:48,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bc5e96e925f448b780c7277f72517751 is 50, key is test_row_0/C:col10/1734371807980/Put/seqid=0 2024-12-16T17:56:48,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741919_1095 (size=12301) 2024-12-16T17:56:48,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bc5e96e925f448b780c7277f72517751 2024-12-16T17:56:48,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/be05da72a17e4ff786a5e04e748077e2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/be05da72a17e4ff786a5e04e748077e2 2024-12-16T17:56:48,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/be05da72a17e4ff786a5e04e748077e2, entries=200, sequenceid=386, filesize=14.4 K 2024-12-16T17:56:48,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/0ebd3c1330c94aef958895f152d669b5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/0ebd3c1330c94aef958895f152d669b5 2024-12-16T17:56:48,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/0ebd3c1330c94aef958895f152d669b5, entries=150, sequenceid=386, filesize=12.0 K 2024-12-16T17:56:48,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bc5e96e925f448b780c7277f72517751 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bc5e96e925f448b780c7277f72517751 2024-12-16T17:56:48,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bc5e96e925f448b780c7277f72517751, entries=150, sequenceid=386, filesize=12.0 K 2024-12-16T17:56:48,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 99318ad6c4e7b8782230d738424ff705 in 922ms, sequenceid=386, compaction requested=true 2024-12-16T17:56:48,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:48,905 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:48,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:48,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:48,906 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:48,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:48,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:48,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:48,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:48,908 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49984 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:48,908 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:48,908 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,908 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/4c85751af8444d56b854664297602353, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/fada509f355c4ffab32353b70fd9a1e8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/5c98f35a9cad47ca910cffb8b6b32ed3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/be05da72a17e4ff786a5e04e748077e2] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=48.8 K 2024-12-16T17:56:48,911 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:48,911 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:48,912 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,912 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7886f7480dc14798b6635992b16822dc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/296d2921ccbc4667ab2fdb1b8eba5cf7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dffe07d19304867a19791a7d9183b1d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/0ebd3c1330c94aef958895f152d669b5] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=46.4 K 2024-12-16T17:56:48,913 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7886f7480dc14798b6635992b16822dc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1734371806694 2024-12-16T17:56:48,913 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c85751af8444d56b854664297602353, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1734371806694 2024-12-16T17:56:48,913 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 296d2921ccbc4667ab2fdb1b8eba5cf7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734371807036 2024-12-16T17:56:48,914 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting fada509f355c4ffab32353b70fd9a1e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734371807036 2024-12-16T17:56:48,914 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dffe07d19304867a19791a7d9183b1d, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1734371807664 2024-12-16T17:56:48,915 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c98f35a9cad47ca910cffb8b6b32ed3, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1734371807664 2024-12-16T17:56:48,917 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ebd3c1330c94aef958895f152d669b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1734371807979 2024-12-16T17:56:48,917 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting be05da72a17e4ff786a5e04e748077e2, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1734371807979 2024-12-16T17:56:48,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T17:56:48,932 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:48,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-16T17:56:48,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,933 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:48,933 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:56:48,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:48,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:48,934 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/a4be1aa592074b17ae8f88edcde284ed is 50, key is test_row_0/B:col10/1734371807980/Put/seqid=0 2024-12-16T17:56:48,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:48,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:48,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:48,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:48,946 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:48,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a3ff45c3cd0c4e189b52ac603787ea6b is 50, key is test_row_0/A:col10/1734371808011/Put/seqid=0 2024-12-16T17:56:48,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741920_1096 (size=13221) 2024-12-16T17:56:48,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741921_1097 (size=12301) 2024-12-16T17:56:48,957 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/8d17e87f224e4a4092f9ae0e22bc5d28 is 50, key is test_row_0/A:col10/1734371807980/Put/seqid=0 2024-12-16T17:56:48,957 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a3ff45c3cd0c4e189b52ac603787ea6b 2024-12-16T17:56:48,965 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/a4be1aa592074b17ae8f88edcde284ed as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/a4be1aa592074b17ae8f88edcde284ed 2024-12-16T17:56:48,973 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into a4be1aa592074b17ae8f88edcde284ed(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:48,973 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:48,973 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=12, startTime=1734371808906; duration=0sec 2024-12-16T17:56:48,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741922_1098 (size=13221) 2024-12-16T17:56:48,974 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:48,974 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:48,975 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:56:48,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3dcbace6c66f4bcd8e6088f429ee302b is 50, key is test_row_0/B:col10/1734371808011/Put/seqid=0 2024-12-16T17:56:48,985 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/8d17e87f224e4a4092f9ae0e22bc5d28 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/8d17e87f224e4a4092f9ae0e22bc5d28 2024-12-16T17:56:48,986 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:56:48,987 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:48,987 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:48,987 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bb22ff509e6241b08a1abb50d02de61e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/f15fac5870874dd6b56b706665d8e870, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/71bd2ce10a484919aad29c96e6169555, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bc5e96e925f448b780c7277f72517751] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=46.4 K 2024-12-16T17:56:48,988 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting bb22ff509e6241b08a1abb50d02de61e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1734371806694 2024-12-16T17:56:48,989 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting f15fac5870874dd6b56b706665d8e870, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1734371807036 2024-12-16T17:56:48,993 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 71bd2ce10a484919aad29c96e6169555, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1734371807664 2024-12-16T17:56:48,994 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting bc5e96e925f448b780c7277f72517751, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1734371807979 2024-12-16T17:56:48,997 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 8d17e87f224e4a4092f9ae0e22bc5d28(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:48,997 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:48,998 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=12, startTime=1734371808905; duration=0sec 2024-12-16T17:56:48,998 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:48,998 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:49,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741923_1099 (size=12301) 2024-12-16T17:56:49,031 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#85 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:49,032 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/02de04e6205c4b1f9b4a2f967bda7692 is 50, key is test_row_0/C:col10/1734371807980/Put/seqid=0 2024-12-16T17:56:49,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741924_1100 (size=13221) 2024-12-16T17:56:49,049 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/02de04e6205c4b1f9b4a2f967bda7692 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/02de04e6205c4b1f9b4a2f967bda7692 2024-12-16T17:56:49,062 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 02de04e6205c4b1f9b4a2f967bda7692(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:49,062 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:49,062 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=12, startTime=1734371808906; duration=0sec 2024-12-16T17:56:49,062 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:49,062 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:49,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:49,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:49,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371869149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371869149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371869149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371869150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371869150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371869252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371869253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371869255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371869257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,415 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3dcbace6c66f4bcd8e6088f429ee302b 2024-12-16T17:56:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T17:56:49,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/00f360fab86549949af0bdd2e12ad8ee is 50, key is test_row_0/C:col10/1734371808011/Put/seqid=0 2024-12-16T17:56:49,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741925_1101 (size=12301) 2024-12-16T17:56:49,439 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/00f360fab86549949af0bdd2e12ad8ee 2024-12-16T17:56:49,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a3ff45c3cd0c4e189b52ac603787ea6b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a3ff45c3cd0c4e189b52ac603787ea6b 2024-12-16T17:56:49,457 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a3ff45c3cd0c4e189b52ac603787ea6b, entries=150, sequenceid=411, filesize=12.0 K 2024-12-16T17:56:49,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371869456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3dcbace6c66f4bcd8e6088f429ee302b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dcbace6c66f4bcd8e6088f429ee302b 2024-12-16T17:56:49,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371869457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371869457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371869460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,469 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dcbace6c66f4bcd8e6088f429ee302b, entries=150, sequenceid=411, filesize=12.0 K 2024-12-16T17:56:49,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/00f360fab86549949af0bdd2e12ad8ee as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/00f360fab86549949af0bdd2e12ad8ee 2024-12-16T17:56:49,478 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/00f360fab86549949af0bdd2e12ad8ee, entries=150, sequenceid=411, filesize=12.0 K 2024-12-16T17:56:49,479 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 99318ad6c4e7b8782230d738424ff705 in 546ms, sequenceid=411, compaction requested=false 2024-12-16T17:56:49,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:49,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:49,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-16T17:56:49,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-16T17:56:49,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-16T17:56:49,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1640 sec 2024-12-16T17:56:49,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.1710 sec 2024-12-16T17:56:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:49,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:56:49,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:49,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:49,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:49,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:49,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:49,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:49,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/08312614169549859a5fb176611e45fe is 50, key is test_row_0/A:col10/1734371809762/Put/seqid=0 2024-12-16T17:56:49,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741926_1102 (size=14741) 2024-12-16T17:56:49,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371869806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371869806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371869806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371869808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371869911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371869911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371869912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:49,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371869912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371870114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371870114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371870115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371870115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371870155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/08312614169549859a5fb176611e45fe 2024-12-16T17:56:50,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/bc9b525da1334cd08017fb4b69e7c90b is 50, key is test_row_0/B:col10/1734371809762/Put/seqid=0 2024-12-16T17:56:50,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741927_1103 (size=12301) 2024-12-16T17:56:50,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/bc9b525da1334cd08017fb4b69e7c90b 2024-12-16T17:56:50,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/9ee9df5543da4795923e238b8e4424a0 is 50, key is test_row_0/C:col10/1734371809762/Put/seqid=0 2024-12-16T17:56:50,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741928_1104 (size=12301) 2024-12-16T17:56:50,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371870416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371870417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T17:56:50,420 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-16T17:56:50,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371870418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371870419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:50,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-16T17:56:50,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-16T17:56:50,423 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:50,423 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:50,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:50,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-16T17:56:50,575 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-16T17:56:50,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:50,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:50,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:50,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:50,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:50,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:50,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/9ee9df5543da4795923e238b8e4424a0 2024-12-16T17:56:50,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/08312614169549859a5fb176611e45fe as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/08312614169549859a5fb176611e45fe 2024-12-16T17:56:50,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/08312614169549859a5fb176611e45fe, entries=200, sequenceid=428, filesize=14.4 K 2024-12-16T17:56:50,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/bc9b525da1334cd08017fb4b69e7c90b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/bc9b525da1334cd08017fb4b69e7c90b 2024-12-16T17:56:50,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/bc9b525da1334cd08017fb4b69e7c90b, entries=150, sequenceid=428, filesize=12.0 K 2024-12-16T17:56:50,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/9ee9df5543da4795923e238b8e4424a0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9ee9df5543da4795923e238b8e4424a0 2024-12-16T17:56:50,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9ee9df5543da4795923e238b8e4424a0, entries=150, sequenceid=428, filesize=12.0 K 2024-12-16T17:56:50,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 99318ad6c4e7b8782230d738424ff705 in 902ms, sequenceid=428, compaction requested=true 2024-12-16T17:56:50,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:50,664 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:50,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:50,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:50,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:50,665 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:50,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:50,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:50,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:50,666 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:50,666 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:50,666 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:50,667 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/8d17e87f224e4a4092f9ae0e22bc5d28, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a3ff45c3cd0c4e189b52ac603787ea6b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/08312614169549859a5fb176611e45fe] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=39.3 K 2024-12-16T17:56:50,667 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:50,667 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:50,667 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:50,667 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/a4be1aa592074b17ae8f88edcde284ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dcbace6c66f4bcd8e6088f429ee302b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/bc9b525da1334cd08017fb4b69e7c90b] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=36.9 K 2024-12-16T17:56:50,667 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d17e87f224e4a4092f9ae0e22bc5d28, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1734371807979 2024-12-16T17:56:50,668 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3ff45c3cd0c4e189b52ac603787ea6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1734371808011 2024-12-16T17:56:50,668 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting a4be1aa592074b17ae8f88edcde284ed, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1734371807979 2024-12-16T17:56:50,668 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08312614169549859a5fb176611e45fe, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371809142 2024-12-16T17:56:50,668 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dcbace6c66f4bcd8e6088f429ee302b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1734371808011 2024-12-16T17:56:50,669 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting bc9b525da1334cd08017fb4b69e7c90b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371809142 2024-12-16T17:56:50,679 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:50,679 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/184ce2bb40ec45c9af9249134b720dbd is 50, key is test_row_0/B:col10/1734371809762/Put/seqid=0 2024-12-16T17:56:50,697 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:50,698 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/c44339bde36e4283bf4fe8011da61080 is 50, key is test_row_0/A:col10/1734371809762/Put/seqid=0 2024-12-16T17:56:50,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741929_1105 (size=13323) 2024-12-16T17:56:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-16T17:56:50,726 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/184ce2bb40ec45c9af9249134b720dbd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/184ce2bb40ec45c9af9249134b720dbd 2024-12-16T17:56:50,729 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-16T17:56:50,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:50,730 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-16T17:56:50,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:50,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:50,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:50,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:50,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:50,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:50,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741930_1106 (size=13323) 2024-12-16T17:56:50,737 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 184ce2bb40ec45c9af9249134b720dbd(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:50,737 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:50,737 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371810665; duration=0sec 2024-12-16T17:56:50,738 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:50,738 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:50,738 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:50,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/55b0ce89a0234c42b11cf0d804195104 is 50, key is test_row_0/A:col10/1734371809781/Put/seqid=0 2024-12-16T17:56:50,742 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:50,742 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:50,742 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:50,742 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/02de04e6205c4b1f9b4a2f967bda7692, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/00f360fab86549949af0bdd2e12ad8ee, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9ee9df5543da4795923e238b8e4424a0] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=36.9 K 2024-12-16T17:56:50,743 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 02de04e6205c4b1f9b4a2f967bda7692, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1734371807979 2024-12-16T17:56:50,744 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 00f360fab86549949af0bdd2e12ad8ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1734371808011 2024-12-16T17:56:50,744 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ee9df5543da4795923e238b8e4424a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371809142 2024-12-16T17:56:50,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741931_1107 (size=12301) 2024-12-16T17:56:50,762 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:50,762 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/3feeaffbf50243b39e1ca1814725da0a is 50, key is test_row_0/C:col10/1734371809762/Put/seqid=0 2024-12-16T17:56:50,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741932_1108 (size=13323) 2024-12-16T17:56:50,779 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/3feeaffbf50243b39e1ca1814725da0a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3feeaffbf50243b39e1ca1814725da0a 2024-12-16T17:56:50,788 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 3feeaffbf50243b39e1ca1814725da0a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:50,788 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:50,788 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371810665; duration=0sec 2024-12-16T17:56:50,788 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:50,789 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:50,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:50,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:50,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371870931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371870966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371870966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:50,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371870968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-16T17:56:51,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371871067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371871071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371871072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371871072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,138 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/c44339bde36e4283bf4fe8011da61080 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c44339bde36e4283bf4fe8011da61080 2024-12-16T17:56:51,146 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into c44339bde36e4283bf4fe8011da61080(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:51,146 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:51,146 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371810664; duration=0sec 2024-12-16T17:56:51,146 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:51,146 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:51,160 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/55b0ce89a0234c42b11cf0d804195104 2024-12-16T17:56:51,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/9ce6a910262e40e89f4a6a64220bfaf4 is 50, key is test_row_0/B:col10/1734371809781/Put/seqid=0 2024-12-16T17:56:51,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741933_1109 (size=12301) 2024-12-16T17:56:51,177 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/9ce6a910262e40e89f4a6a64220bfaf4 2024-12-16T17:56:51,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bd400da5893b4fed88ba2f0904f722a4 is 50, key is test_row_0/C:col10/1734371809781/Put/seqid=0 2024-12-16T17:56:51,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741934_1110 (size=12301) 2024-12-16T17:56:51,193 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bd400da5893b4fed88ba2f0904f722a4 2024-12-16T17:56:51,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/55b0ce89a0234c42b11cf0d804195104 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/55b0ce89a0234c42b11cf0d804195104 2024-12-16T17:56:51,202 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/55b0ce89a0234c42b11cf0d804195104, entries=150, sequenceid=450, filesize=12.0 K 2024-12-16T17:56:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/9ce6a910262e40e89f4a6a64220bfaf4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9ce6a910262e40e89f4a6a64220bfaf4 2024-12-16T17:56:51,208 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9ce6a910262e40e89f4a6a64220bfaf4, entries=150, sequenceid=450, filesize=12.0 K 2024-12-16T17:56:51,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/bd400da5893b4fed88ba2f0904f722a4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bd400da5893b4fed88ba2f0904f722a4 2024-12-16T17:56:51,215 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bd400da5893b4fed88ba2f0904f722a4, entries=150, sequenceid=450, filesize=12.0 K 2024-12-16T17:56:51,216 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 99318ad6c4e7b8782230d738424ff705 in 486ms, sequenceid=450, compaction requested=false 2024-12-16T17:56:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:51,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-16T17:56:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-16T17:56:51,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-16T17:56:51,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 794 msec 2024-12-16T17:56:51,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 799 msec 2024-12-16T17:56:51,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:51,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-16T17:56:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:51,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/e776fdca60a642bd9131d15472b7b62b is 50, key is test_row_0/A:col10/1734371810931/Put/seqid=0 2024-12-16T17:56:51,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741935_1111 (size=14741) 2024-12-16T17:56:51,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371871287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371871293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371871293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371871293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371871394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371871396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371871396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371871396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-16T17:56:51,526 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-16T17:56:51,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:51,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-16T17:56:51,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-16T17:56:51,529 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:51,529 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:51,530 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:51,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371871596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371871598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371871600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371871600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-16T17:56:51,680 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-16T17:56:51,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:51,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:51,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:51,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/e776fdca60a642bd9131d15472b7b62b 2024-12-16T17:56:51,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/cc17a241680b4c68a828deac73e09660 is 50, key is test_row_0/B:col10/1734371810931/Put/seqid=0 2024-12-16T17:56:51,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741936_1112 (size=12301) 2024-12-16T17:56:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-16T17:56:51,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-16T17:56:51,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:51,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:51,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:51,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371871899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371871903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371871904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371871905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,987 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:51,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-16T17:56:51,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:51,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:51,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:51,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:51,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/cc17a241680b4c68a828deac73e09660 2024-12-16T17:56:52,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/0cc3cb5d88244d0bb30edb0f087f7c65 is 50, key is test_row_0/C:col10/1734371810931/Put/seqid=0 2024-12-16T17:56:52,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741937_1113 (size=12301) 2024-12-16T17:56:52,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-16T17:56:52,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-16T17:56:52,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:52,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:52,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371872164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,166 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:56:52,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-16T17:56:52,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:52,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371872402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371872404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371872409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371872410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-16T17:56:52,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:52,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:52,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/0cc3cb5d88244d0bb30edb0f087f7c65 2024-12-16T17:56:52,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/e776fdca60a642bd9131d15472b7b62b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/e776fdca60a642bd9131d15472b7b62b 2024-12-16T17:56:52,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/e776fdca60a642bd9131d15472b7b62b, entries=200, sequenceid=469, filesize=14.4 K 2024-12-16T17:56:52,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/cc17a241680b4c68a828deac73e09660 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/cc17a241680b4c68a828deac73e09660 2024-12-16T17:56:52,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/cc17a241680b4c68a828deac73e09660, entries=150, sequenceid=469, filesize=12.0 K 2024-12-16T17:56:52,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/0cc3cb5d88244d0bb30edb0f087f7c65 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0cc3cb5d88244d0bb30edb0f087f7c65 2024-12-16T17:56:52,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0cc3cb5d88244d0bb30edb0f087f7c65, entries=150, sequenceid=469, filesize=12.0 K 2024-12-16T17:56:52,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 99318ad6c4e7b8782230d738424ff705 in 1271ms, sequenceid=469, compaction requested=true 2024-12-16T17:56:52,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:52,542 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:52,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:52,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:52,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:52,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:52,542 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:52,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:52,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:52,544 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:52,544 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:52,544 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:52,544 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:52,544 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,544 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,544 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c44339bde36e4283bf4fe8011da61080, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/55b0ce89a0234c42b11cf0d804195104, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/e776fdca60a642bd9131d15472b7b62b] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=39.4 K 2024-12-16T17:56:52,544 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/184ce2bb40ec45c9af9249134b720dbd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9ce6a910262e40e89f4a6a64220bfaf4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/cc17a241680b4c68a828deac73e09660] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.0 K 2024-12-16T17:56:52,544 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c44339bde36e4283bf4fe8011da61080, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371809142 2024-12-16T17:56:52,544 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 184ce2bb40ec45c9af9249134b720dbd, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371809142 2024-12-16T17:56:52,545 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ce6a910262e40e89f4a6a64220bfaf4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1734371809781 2024-12-16T17:56:52,545 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55b0ce89a0234c42b11cf0d804195104, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1734371809781 2024-12-16T17:56:52,545 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting cc17a241680b4c68a828deac73e09660, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1734371810931 2024-12-16T17:56:52,545 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e776fdca60a642bd9131d15472b7b62b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1734371810930 2024-12-16T17:56:52,554 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:52,555 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/22305535de4a4ef6b222eed3210ae23d is 50, key is test_row_0/A:col10/1734371810931/Put/seqid=0 2024-12-16T17:56:52,557 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#100 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:52,558 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7816c4a6778549efaa240ff5c6249d38 is 50, key is test_row_0/B:col10/1734371810931/Put/seqid=0 2024-12-16T17:56:52,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741938_1114 (size=13425) 2024-12-16T17:56:52,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741939_1115 (size=13425) 2024-12-16T17:56:52,598 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:52,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-16T17:56:52,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,599 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-16T17:56:52,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:52,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:52,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:52,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:52,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:52,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:52,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d8a56d19dc2445e18367ab97dc2ee39d is 50, key is test_row_0/A:col10/1734371811286/Put/seqid=0 2024-12-16T17:56:52,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741940_1116 (size=12301) 2024-12-16T17:56:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-16T17:56:52,970 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/22305535de4a4ef6b222eed3210ae23d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/22305535de4a4ef6b222eed3210ae23d 2024-12-16T17:56:52,973 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/7816c4a6778549efaa240ff5c6249d38 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7816c4a6778549efaa240ff5c6249d38 2024-12-16T17:56:52,977 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 22305535de4a4ef6b222eed3210ae23d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:52,977 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:52,977 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371812542; duration=0sec 2024-12-16T17:56:52,978 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:52,978 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:52,978 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:52,979 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:52,980 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:52,980 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:52,980 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3feeaffbf50243b39e1ca1814725da0a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bd400da5893b4fed88ba2f0904f722a4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0cc3cb5d88244d0bb30edb0f087f7c65] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.0 K 2024-12-16T17:56:52,980 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 7816c4a6778549efaa240ff5c6249d38(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:52,980 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:52,981 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371812542; duration=0sec 2024-12-16T17:56:52,981 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3feeaffbf50243b39e1ca1814725da0a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371809142 2024-12-16T17:56:52,981 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:52,981 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:52,981 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd400da5893b4fed88ba2f0904f722a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1734371809781 2024-12-16T17:56:52,982 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cc3cb5d88244d0bb30edb0f087f7c65, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1734371810931 2024-12-16T17:56:52,991 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:52,991 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/a4fa8dd8cee5425297d5db6f589614ed is 50, key is test_row_0/C:col10/1734371810931/Put/seqid=0 2024-12-16T17:56:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741941_1117 (size=13425) 2024-12-16T17:56:53,009 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d8a56d19dc2445e18367ab97dc2ee39d 2024-12-16T17:56:53,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/e0ee819ee321472cbcb8781e556b24b3 is 50, key is test_row_0/B:col10/1734371811286/Put/seqid=0 2024-12-16T17:56:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741942_1118 (size=12301) 2024-12-16T17:56:53,027 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/e0ee819ee321472cbcb8781e556b24b3 2024-12-16T17:56:53,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/311f34a4fd1a4b5a9c2066a6edeb0ea9 is 50, key is test_row_0/C:col10/1734371811286/Put/seqid=0 2024-12-16T17:56:53,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741943_1119 (size=12301) 2024-12-16T17:56:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:53,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:53,423 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/a4fa8dd8cee5425297d5db6f589614ed as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/a4fa8dd8cee5425297d5db6f589614ed 2024-12-16T17:56:53,430 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into a4fa8dd8cee5425297d5db6f589614ed(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:53,430 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:53,430 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371812542; duration=0sec 2024-12-16T17:56:53,430 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:53,430 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:53,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371873429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371873429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371873429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371873432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,443 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/311f34a4fd1a4b5a9c2066a6edeb0ea9 2024-12-16T17:56:53,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/d8a56d19dc2445e18367ab97dc2ee39d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a56d19dc2445e18367ab97dc2ee39d 2024-12-16T17:56:53,453 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a56d19dc2445e18367ab97dc2ee39d, entries=150, sequenceid=489, filesize=12.0 K 2024-12-16T17:56:53,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/e0ee819ee321472cbcb8781e556b24b3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e0ee819ee321472cbcb8781e556b24b3 2024-12-16T17:56:53,460 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e0ee819ee321472cbcb8781e556b24b3, entries=150, sequenceid=489, filesize=12.0 K 2024-12-16T17:56:53,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/311f34a4fd1a4b5a9c2066a6edeb0ea9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/311f34a4fd1a4b5a9c2066a6edeb0ea9 2024-12-16T17:56:53,467 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/311f34a4fd1a4b5a9c2066a6edeb0ea9, entries=150, sequenceid=489, filesize=12.0 K 2024-12-16T17:56:53,469 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 99318ad6c4e7b8782230d738424ff705 in 869ms, sequenceid=489, compaction requested=false 2024-12-16T17:56:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:53,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-16T17:56:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-16T17:56:53,471 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-16T17:56:53,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9400 sec 2024-12-16T17:56:53,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.9450 sec 2024-12-16T17:56:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:53,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-16T17:56:53,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:53,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:53,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:53,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:53,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:53,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:53,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a65dc017545b49239dd9f5d5d36a4d7c is 50, key is test_row_0/A:col10/1734371813536/Put/seqid=0 2024-12-16T17:56:53,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371873548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371873552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371873552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371873553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741944_1120 (size=12301) 2024-12-16T17:56:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-16T17:56:53,633 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-16T17:56:53,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:56:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-16T17:56:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-16T17:56:53,636 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:56:53,636 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:56:53,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:56:53,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371873653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371873656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371873656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371873657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-16T17:56:53,788 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-16T17:56:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:53,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:53,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371873855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371873858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371873859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:53,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371873860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-16T17:56:53,941 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:53,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-16T17:56:53,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:53,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:53,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:53,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:53,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:53,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:53,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a65dc017545b49239dd9f5d5d36a4d7c 2024-12-16T17:56:53,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/49c8d06ec32143ebbc1bf54bde6b3539 is 50, key is test_row_0/B:col10/1734371813536/Put/seqid=0 2024-12-16T17:56:53,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741945_1121 (size=12301) 2024-12-16T17:56:53,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/49c8d06ec32143ebbc1bf54bde6b3539 2024-12-16T17:56:53,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/4661cbf97630442eacae8a9135e0be1a is 50, key is test_row_0/C:col10/1734371813536/Put/seqid=0 2024-12-16T17:56:53,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741946_1122 (size=12301) 2024-12-16T17:56:54,095 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-16T17:56:54,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:54,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371874158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371874161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371874162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371874163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-16T17:56:54,248 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-16T17:56:54,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:54,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/4661cbf97630442eacae8a9135e0be1a 2024-12-16T17:56:54,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a65dc017545b49239dd9f5d5d36a4d7c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a65dc017545b49239dd9f5d5d36a4d7c 2024-12-16T17:56:54,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-16T17:56:54,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:54,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:56:54,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a65dc017545b49239dd9f5d5d36a4d7c, entries=150, sequenceid=511, filesize=12.0 K 2024-12-16T17:56:54,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/49c8d06ec32143ebbc1bf54bde6b3539 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/49c8d06ec32143ebbc1bf54bde6b3539 2024-12-16T17:56:54,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/49c8d06ec32143ebbc1bf54bde6b3539, entries=150, sequenceid=511, filesize=12.0 K 2024-12-16T17:56:54,420 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-16T17:56:54,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/4661cbf97630442eacae8a9135e0be1a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4661cbf97630442eacae8a9135e0be1a 2024-12-16T17:56:54,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4661cbf97630442eacae8a9135e0be1a, entries=150, sequenceid=511, filesize=12.0 K 2024-12-16T17:56:54,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 99318ad6c4e7b8782230d738424ff705 in 894ms, sequenceid=511, compaction requested=true 2024-12-16T17:56:54,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:54,429 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:54,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:54,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:54,430 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:54,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:54,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:54,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:54,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:54,431 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:54,431 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:54,431 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:54,431 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,431 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:54,431 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/22305535de4a4ef6b222eed3210ae23d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a56d19dc2445e18367ab97dc2ee39d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a65dc017545b49239dd9f5d5d36a4d7c] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.1 K 2024-12-16T17:56:54,432 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,432 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7816c4a6778549efaa240ff5c6249d38, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e0ee819ee321472cbcb8781e556b24b3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/49c8d06ec32143ebbc1bf54bde6b3539] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.1 K 2024-12-16T17:56:54,432 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22305535de4a4ef6b222eed3210ae23d, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1734371810931 2024-12-16T17:56:54,432 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7816c4a6778549efaa240ff5c6249d38, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1734371810931 2024-12-16T17:56:54,432 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8a56d19dc2445e18367ab97dc2ee39d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734371811283 2024-12-16T17:56:54,433 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e0ee819ee321472cbcb8781e556b24b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734371811283 2024-12-16T17:56:54,433 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a65dc017545b49239dd9f5d5d36a4d7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1734371813431 2024-12-16T17:56:54,433 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 49c8d06ec32143ebbc1bf54bde6b3539, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1734371813431 2024-12-16T17:56:54,441 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#108 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:54,442 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#109 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:54,442 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/3c51b307791849649a011744c2138b66 is 50, key is test_row_0/A:col10/1734371813536/Put/seqid=0 2024-12-16T17:56:54,442 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/347a150d1a5944efb7db030c4b607378 is 50, key is test_row_0/B:col10/1734371813536/Put/seqid=0 2024-12-16T17:56:54,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741947_1123 (size=13527) 2024-12-16T17:56:54,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741948_1124 (size=13527) 2024-12-16T17:56:54,553 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-16T17:56:54,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,553 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:56:54,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:54,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:54,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:54,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:54,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:54,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:54,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/82277bc615374b70af9ebed53ef103c5 is 50, key is test_row_0/A:col10/1734371813551/Put/seqid=0 2024-12-16T17:56:54,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741949_1125 (size=12301) 2024-12-16T17:56:54,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. as already flushing 2024-12-16T17:56:54,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371874675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371874675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371874676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371874676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-16T17:56:54,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371874778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371874778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371874779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371874779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,856 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/347a150d1a5944efb7db030c4b607378 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/347a150d1a5944efb7db030c4b607378 2024-12-16T17:56:54,858 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/3c51b307791849649a011744c2138b66 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3c51b307791849649a011744c2138b66 2024-12-16T17:56:54,864 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 3c51b307791849649a011744c2138b66(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:54,864 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:54,864 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371814429; duration=0sec 2024-12-16T17:56:54,865 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:54,865 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:54,865 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:54,866 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:54,866 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:54,867 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:54,867 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/a4fa8dd8cee5425297d5db6f589614ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/311f34a4fd1a4b5a9c2066a6edeb0ea9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4661cbf97630442eacae8a9135e0be1a] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.1 K 2024-12-16T17:56:54,867 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 347a150d1a5944efb7db030c4b607378(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:54,867 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:54,867 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371814430; duration=0sec 2024-12-16T17:56:54,867 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4fa8dd8cee5425297d5db6f589614ed, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1734371810931 2024-12-16T17:56:54,868 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:54,868 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:54,868 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 311f34a4fd1a4b5a9c2066a6edeb0ea9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734371811283 2024-12-16T17:56:54,868 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4661cbf97630442eacae8a9135e0be1a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1734371813431 2024-12-16T17:56:54,875 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#111 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:54,876 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/8a57b1f4eb3643de96ef527efc73b5a7 is 50, key is test_row_0/C:col10/1734371813536/Put/seqid=0 2024-12-16T17:56:54,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741950_1126 (size=13527) 2024-12-16T17:56:54,887 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/8a57b1f4eb3643de96ef527efc73b5a7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8a57b1f4eb3643de96ef527efc73b5a7 2024-12-16T17:56:54,892 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 8a57b1f4eb3643de96ef527efc73b5a7(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:54,892 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:54,892 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371814430; duration=0sec 2024-12-16T17:56:54,893 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:54,893 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:54,964 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/82277bc615374b70af9ebed53ef103c5 2024-12-16T17:56:54,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3086dfc92c4146889b152ba4d62e4432 is 50, key is test_row_0/B:col10/1734371813551/Put/seqid=0 2024-12-16T17:56:54,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741951_1127 (size=12301) 2024-12-16T17:56:54,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371874979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371874980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371874981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:54,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:54,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371874982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371875282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371875283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371875284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371875287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,379 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3086dfc92c4146889b152ba4d62e4432 2024-12-16T17:56:55,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c08600e8e7aa42b1b63dcff6a65f10f0 is 50, key is test_row_0/C:col10/1734371813551/Put/seqid=0 2024-12-16T17:56:55,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741952_1128 (size=12301) 2024-12-16T17:56:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-16T17:56:55,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371875788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371875788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371875788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371875790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:55,792 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c08600e8e7aa42b1b63dcff6a65f10f0 2024-12-16T17:56:55,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/82277bc615374b70af9ebed53ef103c5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/82277bc615374b70af9ebed53ef103c5 2024-12-16T17:56:55,803 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/82277bc615374b70af9ebed53ef103c5, entries=150, sequenceid=529, filesize=12.0 K 2024-12-16T17:56:55,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/3086dfc92c4146889b152ba4d62e4432 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3086dfc92c4146889b152ba4d62e4432 2024-12-16T17:56:55,809 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3086dfc92c4146889b152ba4d62e4432, entries=150, sequenceid=529, filesize=12.0 K 2024-12-16T17:56:55,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c08600e8e7aa42b1b63dcff6a65f10f0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c08600e8e7aa42b1b63dcff6a65f10f0 2024-12-16T17:56:55,815 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c08600e8e7aa42b1b63dcff6a65f10f0, entries=150, sequenceid=529, filesize=12.0 K 2024-12-16T17:56:55,816 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 99318ad6c4e7b8782230d738424ff705 in 1263ms, sequenceid=529, compaction requested=false 2024-12-16T17:56:55,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:55,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:55,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-16T17:56:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-16T17:56:55,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-16T17:56:55,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1820 sec 2024-12-16T17:56:55,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 2.1850 sec 2024-12-16T17:56:56,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:56,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-16T17:56:56,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:56,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:56,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:56,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:56,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:56,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:56,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/871e6af9a0274026b78bdae4fbeea583 is 50, key is test_row_0/A:col10/1734371814675/Put/seqid=0 2024-12-16T17:56:56,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741953_1129 (size=14741) 2024-12-16T17:56:56,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371876202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371876305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371876508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/871e6af9a0274026b78bdae4fbeea583 2024-12-16T17:56:56,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/08244e1936b346be9b07d80e3fe03b03 is 50, key is test_row_0/B:col10/1734371814675/Put/seqid=0 2024-12-16T17:56:56,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741954_1130 (size=12301) 2024-12-16T17:56:56,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/08244e1936b346be9b07d80e3fe03b03 2024-12-16T17:56:56,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c876297b34a7434fbc7ecb15800231c2 is 50, key is test_row_0/C:col10/1734371814675/Put/seqid=0 2024-12-16T17:56:56,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741955_1131 (size=12301) 2024-12-16T17:56:56,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40298 deadline: 1734371876793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40296 deadline: 1734371876794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40322 deadline: 1734371876797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40284 deadline: 1734371876798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:56:56,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40334 deadline: 1734371876811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 2024-12-16T17:56:56,848 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c4dd458 to 127.0.0.1:49190 2024-12-16T17:56:56,848 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:56,849 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51254994 to 127.0.0.1:49190 2024-12-16T17:56:56,849 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:56,849 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00a9bab0 to 127.0.0.1:49190 2024-12-16T17:56:56,849 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:56,850 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x668d0ebc to 127.0.0.1:49190 2024-12-16T17:56:56,850 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:57,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c876297b34a7434fbc7ecb15800231c2 2024-12-16T17:56:57,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/871e6af9a0274026b78bdae4fbeea583 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/871e6af9a0274026b78bdae4fbeea583 2024-12-16T17:56:57,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/871e6af9a0274026b78bdae4fbeea583, entries=200, sequenceid=551, filesize=14.4 K 2024-12-16T17:56:57,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/08244e1936b346be9b07d80e3fe03b03 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08244e1936b346be9b07d80e3fe03b03 2024-12-16T17:56:57,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08244e1936b346be9b07d80e3fe03b03, entries=150, sequenceid=551, filesize=12.0 K 2024-12-16T17:56:57,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/c876297b34a7434fbc7ecb15800231c2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c876297b34a7434fbc7ecb15800231c2 2024-12-16T17:56:57,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c876297b34a7434fbc7ecb15800231c2, entries=150, sequenceid=551, filesize=12.0 K 2024-12-16T17:56:57,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 99318ad6c4e7b8782230d738424ff705 in 872ms, sequenceid=551, compaction requested=true 2024-12-16T17:56:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:56:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:57,046 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:56:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:57,046 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99318ad6c4e7b8782230d738424ff705:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:56:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:57,047 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40569 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:57,047 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:57,047 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/B is initiating minor compaction (all files) 2024-12-16T17:56:57,047 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/A is initiating minor compaction (all files) 2024-12-16T17:56:57,047 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/B in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:57,047 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/A in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:57,047 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3c51b307791849649a011744c2138b66, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/82277bc615374b70af9ebed53ef103c5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/871e6af9a0274026b78bdae4fbeea583] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=39.6 K 2024-12-16T17:56:57,047 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/347a150d1a5944efb7db030c4b607378, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3086dfc92c4146889b152ba4d62e4432, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08244e1936b346be9b07d80e3fe03b03] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.2 K 2024-12-16T17:56:57,048 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c51b307791849649a011744c2138b66, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1734371813431 2024-12-16T17:56:57,048 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 347a150d1a5944efb7db030c4b607378, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1734371813431 2024-12-16T17:56:57,048 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82277bc615374b70af9ebed53ef103c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1734371813547 2024-12-16T17:56:57,048 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 3086dfc92c4146889b152ba4d62e4432, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1734371813547 2024-12-16T17:56:57,048 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 871e6af9a0274026b78bdae4fbeea583, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1734371814674 2024-12-16T17:56:57,048 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 08244e1936b346be9b07d80e3fe03b03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1734371814675 2024-12-16T17:56:57,054 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#A#compaction#117 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:57,054 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#B#compaction#118 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:57,055 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/0325f26a76384a8ab469221d10150e59 is 50, key is test_row_0/A:col10/1734371814675/Put/seqid=0 2024-12-16T17:56:57,055 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/09124cbb097744dfbfd7853ac8f89f9a is 50, key is test_row_0/B:col10/1734371814675/Put/seqid=0 2024-12-16T17:56:57,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741957_1133 (size=13629) 2024-12-16T17:56:57,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741956_1132 (size=13629) 2024-12-16T17:56:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:57,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:56:57,323 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x070038fb to 127.0.0.1:49190 2024-12-16T17:56:57,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:57,324 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:57,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:57,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:57,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:57,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:57,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:57,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a5679c36de62474bb0fe55881e1731ca is 50, key is test_row_0/A:col10/1734371816193/Put/seqid=0 2024-12-16T17:56:57,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741958_1134 (size=12301) 2024-12-16T17:56:57,472 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/09124cbb097744dfbfd7853ac8f89f9a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/09124cbb097744dfbfd7853ac8f89f9a 2024-12-16T17:56:57,472 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/0325f26a76384a8ab469221d10150e59 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/0325f26a76384a8ab469221d10150e59 2024-12-16T17:56:57,478 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/A of 99318ad6c4e7b8782230d738424ff705 into 0325f26a76384a8ab469221d10150e59(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:57,478 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/B of 99318ad6c4e7b8782230d738424ff705 into 09124cbb097744dfbfd7853ac8f89f9a(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:57,478 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:57,478 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:57,478 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/A, priority=13, startTime=1734371817046; duration=0sec 2024-12-16T17:56:57,478 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/B, priority=13, startTime=1734371817046; duration=0sec 2024-12-16T17:56:57,478 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:57,478 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:A 2024-12-16T17:56:57,478 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:56:57,478 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:B 2024-12-16T17:56:57,478 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:56:57,479 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:56:57,479 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 99318ad6c4e7b8782230d738424ff705/C is initiating minor compaction (all files) 2024-12-16T17:56:57,480 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99318ad6c4e7b8782230d738424ff705/C in TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:57,480 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8a57b1f4eb3643de96ef527efc73b5a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c08600e8e7aa42b1b63dcff6a65f10f0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c876297b34a7434fbc7ecb15800231c2] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp, totalSize=37.2 K 2024-12-16T17:56:57,480 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a57b1f4eb3643de96ef527efc73b5a7, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1734371813431 2024-12-16T17:56:57,480 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c08600e8e7aa42b1b63dcff6a65f10f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1734371813547 2024-12-16T17:56:57,481 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c876297b34a7434fbc7ecb15800231c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1734371814675 2024-12-16T17:56:57,488 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99318ad6c4e7b8782230d738424ff705#C#compaction#120 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:56:57,489 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/1fa90e254eb74cae87176a64a4e466b2 is 50, key is test_row_0/C:col10/1734371814675/Put/seqid=0 2024-12-16T17:56:57,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741959_1135 (size=13629) 2024-12-16T17:56:57,534 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T17:56:57,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a5679c36de62474bb0fe55881e1731ca 2024-12-16T17:56:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-16T17:56:57,740 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-16T17:56:57,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/44811e7e931545919aae4110d6987fd6 is 50, key is test_row_0/B:col10/1734371816193/Put/seqid=0 2024-12-16T17:56:57,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741960_1136 (size=12301) 2024-12-16T17:56:57,907 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/1fa90e254eb74cae87176a64a4e466b2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1fa90e254eb74cae87176a64a4e466b2 2024-12-16T17:56:57,913 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99318ad6c4e7b8782230d738424ff705/C of 99318ad6c4e7b8782230d738424ff705 into 1fa90e254eb74cae87176a64a4e466b2(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:56:57,913 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:57,913 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705., storeName=99318ad6c4e7b8782230d738424ff705/C, priority=13, startTime=1734371817046; duration=0sec 2024-12-16T17:56:57,913 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:56:57,913 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99318ad6c4e7b8782230d738424ff705:C 2024-12-16T17:56:58,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/44811e7e931545919aae4110d6987fd6 2024-12-16T17:56:58,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/54b8ce8a56a7428e8a3ff6c7e8067932 is 50, key is test_row_0/C:col10/1734371816193/Put/seqid=0 2024-12-16T17:56:58,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741961_1137 (size=12301) 2024-12-16T17:56:58,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/54b8ce8a56a7428e8a3ff6c7e8067932 2024-12-16T17:56:58,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/a5679c36de62474bb0fe55881e1731ca as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a5679c36de62474bb0fe55881e1731ca 2024-12-16T17:56:58,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a5679c36de62474bb0fe55881e1731ca, entries=150, sequenceid=569, filesize=12.0 K 2024-12-16T17:56:58,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/44811e7e931545919aae4110d6987fd6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/44811e7e931545919aae4110d6987fd6 2024-12-16T17:56:58,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/44811e7e931545919aae4110d6987fd6, entries=150, sequenceid=569, filesize=12.0 K 2024-12-16T17:56:58,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/54b8ce8a56a7428e8a3ff6c7e8067932 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/54b8ce8a56a7428e8a3ff6c7e8067932 2024-12-16T17:56:58,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/54b8ce8a56a7428e8a3ff6c7e8067932, entries=150, sequenceid=569, filesize=12.0 K 2024-12-16T17:56:58,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=0 B/0 for 99318ad6c4e7b8782230d738424ff705 in 1299ms, sequenceid=569, compaction requested=false 2024-12-16T17:56:58,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:56:58,801 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f6c377 to 127.0.0.1:49190 2024-12-16T17:56:58,802 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:58,814 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x111a5244 to 127.0.0.1:49190 2024-12-16T17:56:58,814 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:58,814 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c1b9a1b to 127.0.0.1:49190 2024-12-16T17:56:58,814 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:58,823 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d5f32f0 to 127.0.0.1:49190 2024-12-16T17:56:58,823 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 94 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7174 2024-12-16T17:56:58,824 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7122 2024-12-16T17:56:58,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-16T17:56:58,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3154 2024-12-16T17:56:58,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9458 rows 2024-12-16T17:56:58,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3168 2024-12-16T17:56:58,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9497 rows 2024-12-16T17:56:58,825 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T17:56:58,825 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f7b70c9 to 127.0.0.1:49190 2024-12-16T17:56:58,825 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:56:58,829 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-16T17:56:58,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-16T17:56:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-16T17:56:58,840 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371818840"}]},"ts":"1734371818840"} 2024-12-16T17:56:58,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-16T17:56:58,841 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-16T17:56:58,849 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-16T17:56:58,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:56:58,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99318ad6c4e7b8782230d738424ff705, UNASSIGN}] 2024-12-16T17:56:58,855 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99318ad6c4e7b8782230d738424ff705, UNASSIGN 2024-12-16T17:56:58,856 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=99318ad6c4e7b8782230d738424ff705, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:56:58,857 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:56:58,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; CloseRegionProcedure 99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:56:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-16T17:56:59,013 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:56:59,015 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(124): Close 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:56:59,015 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:56:59,016 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1681): Closing 99318ad6c4e7b8782230d738424ff705, disabling compactions & flushes 2024-12-16T17:56:59,016 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:59,016 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:59,016 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. after waiting 0 ms 2024-12-16T17:56:59,016 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:56:59,017 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(2837): Flushing 99318ad6c4e7b8782230d738424ff705 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-16T17:56:59,017 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=A 2024-12-16T17:56:59,017 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:59,017 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=B 2024-12-16T17:56:59,017 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:59,017 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99318ad6c4e7b8782230d738424ff705, store=C 2024-12-16T17:56:59,017 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:56:59,023 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/905722f361254254b6ed15f0ef7579b6 is 50, key is test_row_0/A:col10/1734371818812/Put/seqid=0 2024-12-16T17:56:59,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741962_1138 (size=9857) 2024-12-16T17:56:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-16T17:56:59,430 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/905722f361254254b6ed15f0ef7579b6 2024-12-16T17:56:59,445 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/28d1d7a9ded64322a709fbad329e0bdd is 50, key is test_row_0/B:col10/1734371818812/Put/seqid=0 2024-12-16T17:56:59,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-16T17:56:59,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741963_1139 (size=9857) 2024-12-16T17:56:59,851 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/28d1d7a9ded64322a709fbad329e0bdd 2024-12-16T17:56:59,864 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/0f2df3931fda4bdcae88f4ac3565e3d7 is 50, key is test_row_0/C:col10/1734371818812/Put/seqid=0 2024-12-16T17:56:59,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741964_1140 (size=9857) 2024-12-16T17:56:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-16T17:57:00,271 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/0f2df3931fda4bdcae88f4ac3565e3d7 2024-12-16T17:57:00,283 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/A/905722f361254254b6ed15f0ef7579b6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/905722f361254254b6ed15f0ef7579b6 2024-12-16T17:57:00,290 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/905722f361254254b6ed15f0ef7579b6, entries=100, sequenceid=579, filesize=9.6 K 2024-12-16T17:57:00,291 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/B/28d1d7a9ded64322a709fbad329e0bdd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/28d1d7a9ded64322a709fbad329e0bdd 2024-12-16T17:57:00,296 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/28d1d7a9ded64322a709fbad329e0bdd, entries=100, sequenceid=579, filesize=9.6 K 2024-12-16T17:57:00,297 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/.tmp/C/0f2df3931fda4bdcae88f4ac3565e3d7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0f2df3931fda4bdcae88f4ac3565e3d7 2024-12-16T17:57:00,301 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0f2df3931fda4bdcae88f4ac3565e3d7, entries=100, sequenceid=579, filesize=9.6 K 2024-12-16T17:57:00,302 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 99318ad6c4e7b8782230d738424ff705 in 1286ms, sequenceid=579, compaction requested=true 2024-12-16T17:57:00,303 DEBUG [StoreCloser-TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/27e28eaf7dfc4e9a848283467fc94f93, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/ec695928b4634e2e94c6f1c27cfe61be, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/737f0e1420f0408594f1fce94c38008e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/088927053dee47739704dacde7207c95, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/52be8eb4aef0458b9c56944ba033f8f7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/48ad14bc92fe4d76800e031cc7bd4e3a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/540d85c4a8504848a354eea6445834f2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d0b4ee1360ac4a698c6bdc3535dcdd36, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/701acd63c17c4387bcee377689bbf751, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a6dda673a1421d8ee759326067306f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3b48c4f995724275ba6d079305393ec2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b851fb47b6bb4be9ac4c137c20c14b85, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c67b06fb67a946a4a8400d1ff456f636, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/dd7976d569124dd7a7746eaa362becb7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1b99178b095441679aa4d2c74989a155, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/18639d61e1b94d1a96ffaa942f004997, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/10aadf957f554aab979bd3096f233e91, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/eda0a2fdec314a52a2ede1db1dfa8e79, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/f1e3608f5a44478f85e6297ea90bec0c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/98c10f93cc0e4e018d39cd97f9db94ee, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1c434a0d317a4709a57b7e2a11f417de, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b41bb71d1fe5424ca7b4d887255411ea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/df27eec3e2204212b9dba0d5128a7135, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/4c85751af8444d56b854664297602353, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/fada509f355c4ffab32353b70fd9a1e8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/5c98f35a9cad47ca910cffb8b6b32ed3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/be05da72a17e4ff786a5e04e748077e2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/8d17e87f224e4a4092f9ae0e22bc5d28, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a3ff45c3cd0c4e189b52ac603787ea6b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/08312614169549859a5fb176611e45fe, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c44339bde36e4283bf4fe8011da61080, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/55b0ce89a0234c42b11cf0d804195104, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/e776fdca60a642bd9131d15472b7b62b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/22305535de4a4ef6b222eed3210ae23d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a56d19dc2445e18367ab97dc2ee39d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3c51b307791849649a011744c2138b66, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a65dc017545b49239dd9f5d5d36a4d7c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/82277bc615374b70af9ebed53ef103c5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/871e6af9a0274026b78bdae4fbeea583] to archive 2024-12-16T17:57:00,306 DEBUG [StoreCloser-TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:00,312 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/27e28eaf7dfc4e9a848283467fc94f93 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/27e28eaf7dfc4e9a848283467fc94f93 2024-12-16T17:57:00,313 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/ec695928b4634e2e94c6f1c27cfe61be to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/ec695928b4634e2e94c6f1c27cfe61be 2024-12-16T17:57:00,313 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/737f0e1420f0408594f1fce94c38008e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/737f0e1420f0408594f1fce94c38008e 2024-12-16T17:57:00,313 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/48ad14bc92fe4d76800e031cc7bd4e3a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/48ad14bc92fe4d76800e031cc7bd4e3a 2024-12-16T17:57:00,313 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/540d85c4a8504848a354eea6445834f2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/540d85c4a8504848a354eea6445834f2 2024-12-16T17:57:00,313 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d0b4ee1360ac4a698c6bdc3535dcdd36 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d0b4ee1360ac4a698c6bdc3535dcdd36 2024-12-16T17:57:00,313 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/52be8eb4aef0458b9c56944ba033f8f7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/52be8eb4aef0458b9c56944ba033f8f7 2024-12-16T17:57:00,313 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/088927053dee47739704dacde7207c95 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/088927053dee47739704dacde7207c95 2024-12-16T17:57:00,315 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/701acd63c17c4387bcee377689bbf751 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/701acd63c17c4387bcee377689bbf751 2024-12-16T17:57:00,315 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/18639d61e1b94d1a96ffaa942f004997 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/18639d61e1b94d1a96ffaa942f004997 2024-12-16T17:57:00,315 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a6dda673a1421d8ee759326067306f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a6dda673a1421d8ee759326067306f 2024-12-16T17:57:00,315 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/dd7976d569124dd7a7746eaa362becb7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/dd7976d569124dd7a7746eaa362becb7 2024-12-16T17:57:00,316 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c67b06fb67a946a4a8400d1ff456f636 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c67b06fb67a946a4a8400d1ff456f636 2024-12-16T17:57:00,316 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1b99178b095441679aa4d2c74989a155 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1b99178b095441679aa4d2c74989a155 2024-12-16T17:57:00,316 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3b48c4f995724275ba6d079305393ec2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3b48c4f995724275ba6d079305393ec2 2024-12-16T17:57:00,317 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b851fb47b6bb4be9ac4c137c20c14b85 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b851fb47b6bb4be9ac4c137c20c14b85 2024-12-16T17:57:00,317 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/10aadf957f554aab979bd3096f233e91 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/10aadf957f554aab979bd3096f233e91 2024-12-16T17:57:00,318 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/eda0a2fdec314a52a2ede1db1dfa8e79 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/eda0a2fdec314a52a2ede1db1dfa8e79 2024-12-16T17:57:00,318 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1c434a0d317a4709a57b7e2a11f417de to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/1c434a0d317a4709a57b7e2a11f417de 2024-12-16T17:57:00,318 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/98c10f93cc0e4e018d39cd97f9db94ee to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/98c10f93cc0e4e018d39cd97f9db94ee 2024-12-16T17:57:00,318 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/f1e3608f5a44478f85e6297ea90bec0c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/f1e3608f5a44478f85e6297ea90bec0c 2024-12-16T17:57:00,318 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b41bb71d1fe5424ca7b4d887255411ea to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/b41bb71d1fe5424ca7b4d887255411ea 2024-12-16T17:57:00,319 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/df27eec3e2204212b9dba0d5128a7135 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/df27eec3e2204212b9dba0d5128a7135 2024-12-16T17:57:00,319 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/4c85751af8444d56b854664297602353 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/4c85751af8444d56b854664297602353 2024-12-16T17:57:00,319 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/5c98f35a9cad47ca910cffb8b6b32ed3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/5c98f35a9cad47ca910cffb8b6b32ed3 2024-12-16T17:57:00,319 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/fada509f355c4ffab32353b70fd9a1e8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/fada509f355c4ffab32353b70fd9a1e8 2024-12-16T17:57:00,320 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a3ff45c3cd0c4e189b52ac603787ea6b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a3ff45c3cd0c4e189b52ac603787ea6b 2024-12-16T17:57:00,320 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/be05da72a17e4ff786a5e04e748077e2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/be05da72a17e4ff786a5e04e748077e2 2024-12-16T17:57:00,321 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/08312614169549859a5fb176611e45fe to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/08312614169549859a5fb176611e45fe 2024-12-16T17:57:00,321 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/55b0ce89a0234c42b11cf0d804195104 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/55b0ce89a0234c42b11cf0d804195104 2024-12-16T17:57:00,321 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/8d17e87f224e4a4092f9ae0e22bc5d28 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/8d17e87f224e4a4092f9ae0e22bc5d28 2024-12-16T17:57:00,321 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c44339bde36e4283bf4fe8011da61080 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/c44339bde36e4283bf4fe8011da61080 2024-12-16T17:57:00,321 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/22305535de4a4ef6b222eed3210ae23d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/22305535de4a4ef6b222eed3210ae23d 2024-12-16T17:57:00,322 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/e776fdca60a642bd9131d15472b7b62b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/e776fdca60a642bd9131d15472b7b62b 2024-12-16T17:57:00,322 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a56d19dc2445e18367ab97dc2ee39d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/d8a56d19dc2445e18367ab97dc2ee39d 2024-12-16T17:57:00,322 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3c51b307791849649a011744c2138b66 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/3c51b307791849649a011744c2138b66 2024-12-16T17:57:00,322 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a65dc017545b49239dd9f5d5d36a4d7c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a65dc017545b49239dd9f5d5d36a4d7c 2024-12-16T17:57:00,322 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/871e6af9a0274026b78bdae4fbeea583 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/871e6af9a0274026b78bdae4fbeea583 2024-12-16T17:57:00,322 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/82277bc615374b70af9ebed53ef103c5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/82277bc615374b70af9ebed53ef103c5 2024-12-16T17:57:00,335 DEBUG [StoreCloser-TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/101adadc71214d0d82a7842965784165, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/daede70eb63045e6a7abb121943d3e21, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5644b2037651494ab24f116fdd99d0cc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/43907416581c4567bfdf14e9de60b53c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08d4b8ae54014556a2449b481e7d4e8f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/87ad12d6fa474caa9e3cb9fb28257fb6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/733a372abe194589853feda1be9c6525, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/13c44e4d2dc242dc9b01ea664127fc43, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/54defcbe352d42c08317b4f3b2a063e2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/4c4d4d89e1d04455b7fd11b687c00e2f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9d6a6d67921549e4bb83bb0cd5565eae, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7c7649317a6042bb8756e3e3a67b47f9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e384afab16b34fd3b855efd42db768d8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/22fcf8a783dc4409a73b1dc7f8aa7213, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7f6b1ee7cace436b9ffa9007d42e850e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/311e93ab16554161bc98b6d49e813c60, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/edab2f3dc1a14829aed8187ebdc18bb8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/ecd1dc900f95461fa2e7f9e963aa4a40, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/f6470d1101c54d378c62bdd31cf29f9f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/fc315482f6ec492599ec159804df2a6f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/36e315ee0cbe4cf58e10817126dcd534, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/05f444bf98ee420fa04c7adf211ea2e0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7886f7480dc14798b6635992b16822dc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5e1f7ad5baab4824bfb9337b1e32dcce, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/296d2921ccbc4667ab2fdb1b8eba5cf7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dffe07d19304867a19791a7d9183b1d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/a4be1aa592074b17ae8f88edcde284ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/0ebd3c1330c94aef958895f152d669b5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dcbace6c66f4bcd8e6088f429ee302b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/184ce2bb40ec45c9af9249134b720dbd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/bc9b525da1334cd08017fb4b69e7c90b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9ce6a910262e40e89f4a6a64220bfaf4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7816c4a6778549efaa240ff5c6249d38, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/cc17a241680b4c68a828deac73e09660, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e0ee819ee321472cbcb8781e556b24b3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/347a150d1a5944efb7db030c4b607378, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/49c8d06ec32143ebbc1bf54bde6b3539, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3086dfc92c4146889b152ba4d62e4432, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08244e1936b346be9b07d80e3fe03b03] to archive 2024-12-16T17:57:00,336 DEBUG [StoreCloser-TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:00,338 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/101adadc71214d0d82a7842965784165 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/101adadc71214d0d82a7842965784165 2024-12-16T17:57:00,338 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08d4b8ae54014556a2449b481e7d4e8f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08d4b8ae54014556a2449b481e7d4e8f 2024-12-16T17:57:00,338 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/733a372abe194589853feda1be9c6525 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/733a372abe194589853feda1be9c6525 2024-12-16T17:57:00,338 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/daede70eb63045e6a7abb121943d3e21 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/daede70eb63045e6a7abb121943d3e21 2024-12-16T17:57:00,339 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/43907416581c4567bfdf14e9de60b53c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/43907416581c4567bfdf14e9de60b53c 2024-12-16T17:57:00,339 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/87ad12d6fa474caa9e3cb9fb28257fb6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/87ad12d6fa474caa9e3cb9fb28257fb6 2024-12-16T17:57:00,340 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5644b2037651494ab24f116fdd99d0cc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5644b2037651494ab24f116fdd99d0cc 2024-12-16T17:57:00,340 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9d6a6d67921549e4bb83bb0cd5565eae to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9d6a6d67921549e4bb83bb0cd5565eae 2024-12-16T17:57:00,340 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/54defcbe352d42c08317b4f3b2a063e2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/54defcbe352d42c08317b4f3b2a063e2 2024-12-16T17:57:00,340 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7c7649317a6042bb8756e3e3a67b47f9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7c7649317a6042bb8756e3e3a67b47f9 2024-12-16T17:57:00,341 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/4c4d4d89e1d04455b7fd11b687c00e2f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/4c4d4d89e1d04455b7fd11b687c00e2f 2024-12-16T17:57:00,341 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e384afab16b34fd3b855efd42db768d8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e384afab16b34fd3b855efd42db768d8 2024-12-16T17:57:00,342 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/22fcf8a783dc4409a73b1dc7f8aa7213 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/22fcf8a783dc4409a73b1dc7f8aa7213 2024-12-16T17:57:00,342 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/311e93ab16554161bc98b6d49e813c60 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/311e93ab16554161bc98b6d49e813c60 2024-12-16T17:57:00,342 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/edab2f3dc1a14829aed8187ebdc18bb8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/edab2f3dc1a14829aed8187ebdc18bb8 2024-12-16T17:57:00,342 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7f6b1ee7cace436b9ffa9007d42e850e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7f6b1ee7cace436b9ffa9007d42e850e 2024-12-16T17:57:00,343 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/ecd1dc900f95461fa2e7f9e963aa4a40 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/ecd1dc900f95461fa2e7f9e963aa4a40 2024-12-16T17:57:00,344 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/05f444bf98ee420fa04c7adf211ea2e0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/05f444bf98ee420fa04c7adf211ea2e0 2024-12-16T17:57:00,344 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/f6470d1101c54d378c62bdd31cf29f9f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/f6470d1101c54d378c62bdd31cf29f9f 2024-12-16T17:57:00,344 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/13c44e4d2dc242dc9b01ea664127fc43 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/13c44e4d2dc242dc9b01ea664127fc43 2024-12-16T17:57:00,345 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5e1f7ad5baab4824bfb9337b1e32dcce to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/5e1f7ad5baab4824bfb9337b1e32dcce 2024-12-16T17:57:00,345 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7886f7480dc14798b6635992b16822dc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7886f7480dc14798b6635992b16822dc 2024-12-16T17:57:00,345 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/fc315482f6ec492599ec159804df2a6f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/fc315482f6ec492599ec159804df2a6f 2024-12-16T17:57:00,345 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/36e315ee0cbe4cf58e10817126dcd534 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/36e315ee0cbe4cf58e10817126dcd534 2024-12-16T17:57:00,346 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/0ebd3c1330c94aef958895f152d669b5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/0ebd3c1330c94aef958895f152d669b5 2024-12-16T17:57:00,347 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dcbace6c66f4bcd8e6088f429ee302b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dcbace6c66f4bcd8e6088f429ee302b 2024-12-16T17:57:00,347 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/bc9b525da1334cd08017fb4b69e7c90b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/bc9b525da1334cd08017fb4b69e7c90b 2024-12-16T17:57:00,347 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/296d2921ccbc4667ab2fdb1b8eba5cf7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/296d2921ccbc4667ab2fdb1b8eba5cf7 2024-12-16T17:57:00,347 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dffe07d19304867a19791a7d9183b1d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3dffe07d19304867a19791a7d9183b1d 2024-12-16T17:57:00,348 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/184ce2bb40ec45c9af9249134b720dbd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/184ce2bb40ec45c9af9249134b720dbd 2024-12-16T17:57:00,348 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/a4be1aa592074b17ae8f88edcde284ed to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/a4be1aa592074b17ae8f88edcde284ed 2024-12-16T17:57:00,348 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9ce6a910262e40e89f4a6a64220bfaf4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/9ce6a910262e40e89f4a6a64220bfaf4 2024-12-16T17:57:00,349 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7816c4a6778549efaa240ff5c6249d38 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/7816c4a6778549efaa240ff5c6249d38 2024-12-16T17:57:00,349 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/347a150d1a5944efb7db030c4b607378 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/347a150d1a5944efb7db030c4b607378 2024-12-16T17:57:00,350 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/cc17a241680b4c68a828deac73e09660 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/cc17a241680b4c68a828deac73e09660 2024-12-16T17:57:00,350 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/49c8d06ec32143ebbc1bf54bde6b3539 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/49c8d06ec32143ebbc1bf54bde6b3539 2024-12-16T17:57:00,350 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e0ee819ee321472cbcb8781e556b24b3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/e0ee819ee321472cbcb8781e556b24b3 2024-12-16T17:57:00,350 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08244e1936b346be9b07d80e3fe03b03 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/08244e1936b346be9b07d80e3fe03b03 2024-12-16T17:57:00,350 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3086dfc92c4146889b152ba4d62e4432 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/3086dfc92c4146889b152ba4d62e4432 2024-12-16T17:57:00,351 DEBUG [StoreCloser-TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/12d72cbd16064807829fbc7be4c42973, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/84751958ddbb492fb1c2096effdef643, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/118c401fe52b44db99ae7d6d6b092616, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/93eb40b7206b4047928b9bfdb2a7024a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/fa96b4a4d12f4db5a3045a4a74327747, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/5f05729b96e54a61b2441059c8fb456e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8492014779154d899b20b0cf42ff3ca0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1d755e8160be4b84928ba95be813cf7a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/920ad4d878264c03a9890a8d5ec4edfb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/307f6d1dc0ec4087b0538cd1ede94c5a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/70756b4aac4c469c9e1351a0fdd5f04a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/40ebe94e2aad4dcf8cbd1ef2c208233e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/d97c841578a540cf845fd7c40280017b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3d5d332013304459b681ebba91dd22ff, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/2bcabae6955648f0a29e5ef3b9ef4555, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/69c32887809b456abb6b2c7d7f3c4903, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/cfbb7888409c4717b349db432a105d6e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4f67f22f2ef24477a403be93dc85fc8a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/6439325f4caf44969f9c3f2f78fdafb3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/dbfa675837164fdc948c7e4290a75e53, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c70d95ce77ea4ef0b0a494b1a57eca66, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/7c69e1d4406649aabdb17364f9b830f7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bb22ff509e6241b08a1abb50d02de61e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9776a319c22f48eeb867f82c243066bb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/f15fac5870874dd6b56b706665d8e870, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/71bd2ce10a484919aad29c96e6169555, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/02de04e6205c4b1f9b4a2f967bda7692, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bc5e96e925f448b780c7277f72517751, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/00f360fab86549949af0bdd2e12ad8ee, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3feeaffbf50243b39e1ca1814725da0a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9ee9df5543da4795923e238b8e4424a0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bd400da5893b4fed88ba2f0904f722a4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/a4fa8dd8cee5425297d5db6f589614ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0cc3cb5d88244d0bb30edb0f087f7c65, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/311f34a4fd1a4b5a9c2066a6edeb0ea9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8a57b1f4eb3643de96ef527efc73b5a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4661cbf97630442eacae8a9135e0be1a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c08600e8e7aa42b1b63dcff6a65f10f0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c876297b34a7434fbc7ecb15800231c2] to archive 2024-12-16T17:57:00,353 DEBUG [StoreCloser-TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/12d72cbd16064807829fbc7be4c42973 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/12d72cbd16064807829fbc7be4c42973 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/84751958ddbb492fb1c2096effdef643 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/84751958ddbb492fb1c2096effdef643 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/93eb40b7206b4047928b9bfdb2a7024a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/93eb40b7206b4047928b9bfdb2a7024a 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/fa96b4a4d12f4db5a3045a4a74327747 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/fa96b4a4d12f4db5a3045a4a74327747 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/118c401fe52b44db99ae7d6d6b092616 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/118c401fe52b44db99ae7d6d6b092616 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/5f05729b96e54a61b2441059c8fb456e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/5f05729b96e54a61b2441059c8fb456e 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8492014779154d899b20b0cf42ff3ca0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8492014779154d899b20b0cf42ff3ca0 2024-12-16T17:57:00,356 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1d755e8160be4b84928ba95be813cf7a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1d755e8160be4b84928ba95be813cf7a 2024-12-16T17:57:00,358 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/920ad4d878264c03a9890a8d5ec4edfb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/920ad4d878264c03a9890a8d5ec4edfb 2024-12-16T17:57:00,358 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/307f6d1dc0ec4087b0538cd1ede94c5a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/307f6d1dc0ec4087b0538cd1ede94c5a 2024-12-16T17:57:00,358 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/70756b4aac4c469c9e1351a0fdd5f04a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/70756b4aac4c469c9e1351a0fdd5f04a 2024-12-16T17:57:00,358 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/40ebe94e2aad4dcf8cbd1ef2c208233e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/40ebe94e2aad4dcf8cbd1ef2c208233e 2024-12-16T17:57:00,358 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3d5d332013304459b681ebba91dd22ff to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3d5d332013304459b681ebba91dd22ff 2024-12-16T17:57:00,358 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/d97c841578a540cf845fd7c40280017b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/d97c841578a540cf845fd7c40280017b 2024-12-16T17:57:00,359 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/2bcabae6955648f0a29e5ef3b9ef4555 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/2bcabae6955648f0a29e5ef3b9ef4555 2024-12-16T17:57:00,359 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/69c32887809b456abb6b2c7d7f3c4903 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/69c32887809b456abb6b2c7d7f3c4903 2024-12-16T17:57:00,360 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/cfbb7888409c4717b349db432a105d6e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/cfbb7888409c4717b349db432a105d6e 2024-12-16T17:57:00,360 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/6439325f4caf44969f9c3f2f78fdafb3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/6439325f4caf44969f9c3f2f78fdafb3 2024-12-16T17:57:00,360 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4f67f22f2ef24477a403be93dc85fc8a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4f67f22f2ef24477a403be93dc85fc8a 2024-12-16T17:57:00,361 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/7c69e1d4406649aabdb17364f9b830f7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/7c69e1d4406649aabdb17364f9b830f7 2024-12-16T17:57:00,361 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/dbfa675837164fdc948c7e4290a75e53 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/dbfa675837164fdc948c7e4290a75e53 2024-12-16T17:57:00,361 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c70d95ce77ea4ef0b0a494b1a57eca66 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c70d95ce77ea4ef0b0a494b1a57eca66 2024-12-16T17:57:00,362 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9776a319c22f48eeb867f82c243066bb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9776a319c22f48eeb867f82c243066bb 2024-12-16T17:57:00,362 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/f15fac5870874dd6b56b706665d8e870 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/f15fac5870874dd6b56b706665d8e870 2024-12-16T17:57:00,363 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/71bd2ce10a484919aad29c96e6169555 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/71bd2ce10a484919aad29c96e6169555 2024-12-16T17:57:00,363 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bb22ff509e6241b08a1abb50d02de61e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bb22ff509e6241b08a1abb50d02de61e 2024-12-16T17:57:00,363 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/02de04e6205c4b1f9b4a2f967bda7692 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/02de04e6205c4b1f9b4a2f967bda7692 2024-12-16T17:57:00,364 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/00f360fab86549949af0bdd2e12ad8ee to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/00f360fab86549949af0bdd2e12ad8ee 2024-12-16T17:57:00,364 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bd400da5893b4fed88ba2f0904f722a4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bd400da5893b4fed88ba2f0904f722a4 2024-12-16T17:57:00,364 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9ee9df5543da4795923e238b8e4424a0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/9ee9df5543da4795923e238b8e4424a0 2024-12-16T17:57:00,364 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bc5e96e925f448b780c7277f72517751 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/bc5e96e925f448b780c7277f72517751 2024-12-16T17:57:00,365 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3feeaffbf50243b39e1ca1814725da0a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/3feeaffbf50243b39e1ca1814725da0a 2024-12-16T17:57:00,365 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/a4fa8dd8cee5425297d5db6f589614ed to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/a4fa8dd8cee5425297d5db6f589614ed 2024-12-16T17:57:00,365 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/311f34a4fd1a4b5a9c2066a6edeb0ea9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/311f34a4fd1a4b5a9c2066a6edeb0ea9 2024-12-16T17:57:00,366 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0cc3cb5d88244d0bb30edb0f087f7c65 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0cc3cb5d88244d0bb30edb0f087f7c65 2024-12-16T17:57:00,366 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8a57b1f4eb3643de96ef527efc73b5a7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/8a57b1f4eb3643de96ef527efc73b5a7 2024-12-16T17:57:00,366 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4661cbf97630442eacae8a9135e0be1a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/4661cbf97630442eacae8a9135e0be1a 2024-12-16T17:57:00,366 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c08600e8e7aa42b1b63dcff6a65f10f0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c08600e8e7aa42b1b63dcff6a65f10f0 2024-12-16T17:57:00,366 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c876297b34a7434fbc7ecb15800231c2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/c876297b34a7434fbc7ecb15800231c2 2024-12-16T17:57:00,371 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/recovered.edits/582.seqid, newMaxSeqId=582, maxSeqId=1 2024-12-16T17:57:00,374 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705. 2024-12-16T17:57:00,374 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1635): Region close journal for 99318ad6c4e7b8782230d738424ff705: 2024-12-16T17:57:00,376 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(170): Closed 99318ad6c4e7b8782230d738424ff705 2024-12-16T17:57:00,376 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=99318ad6c4e7b8782230d738424ff705, regionState=CLOSED 2024-12-16T17:57:00,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-16T17:57:00,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseRegionProcedure 99318ad6c4e7b8782230d738424ff705, server=3609ad07831c,39733,1734371789085 in 1.5210 sec 2024-12-16T17:57:00,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=30, resume processing ppid=29 2024-12-16T17:57:00,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, ppid=29, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=99318ad6c4e7b8782230d738424ff705, UNASSIGN in 1.5250 sec 2024-12-16T17:57:00,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-16T17:57:00,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5320 sec 2024-12-16T17:57:00,386 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371820385"}]},"ts":"1734371820385"} 2024-12-16T17:57:00,387 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-16T17:57:00,424 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-16T17:57:00,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5900 sec 2024-12-16T17:57:00,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-16T17:57:00,950 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-16T17:57:00,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-16T17:57:00,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:00,964 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=32, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:00,965 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=32, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-16T17:57:00,967 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705 2024-12-16T17:57:00,972 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/recovered.edits] 2024-12-16T17:57:00,976 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/905722f361254254b6ed15f0ef7579b6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/905722f361254254b6ed15f0ef7579b6 2024-12-16T17:57:00,976 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/0325f26a76384a8ab469221d10150e59 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/0325f26a76384a8ab469221d10150e59 2024-12-16T17:57:00,976 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a5679c36de62474bb0fe55881e1731ca to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/A/a5679c36de62474bb0fe55881e1731ca 2024-12-16T17:57:00,979 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/09124cbb097744dfbfd7853ac8f89f9a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/09124cbb097744dfbfd7853ac8f89f9a 2024-12-16T17:57:00,979 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/28d1d7a9ded64322a709fbad329e0bdd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/28d1d7a9ded64322a709fbad329e0bdd 2024-12-16T17:57:00,979 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/44811e7e931545919aae4110d6987fd6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/B/44811e7e931545919aae4110d6987fd6 2024-12-16T17:57:00,982 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/54b8ce8a56a7428e8a3ff6c7e8067932 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/54b8ce8a56a7428e8a3ff6c7e8067932 2024-12-16T17:57:00,982 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1fa90e254eb74cae87176a64a4e466b2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/1fa90e254eb74cae87176a64a4e466b2 2024-12-16T17:57:00,982 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0f2df3931fda4bdcae88f4ac3565e3d7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/C/0f2df3931fda4bdcae88f4ac3565e3d7 2024-12-16T17:57:00,985 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/recovered.edits/582.seqid to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705/recovered.edits/582.seqid 2024-12-16T17:57:00,986 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/99318ad6c4e7b8782230d738424ff705 2024-12-16T17:57:00,986 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-16T17:57:00,991 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=32, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:00,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-16T17:57:00,997 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-16T17:57:01,023 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-16T17:57:01,024 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=32, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:01,024 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-16T17:57:01,024 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734371821024"}]},"ts":"9223372036854775807"} 2024-12-16T17:57:01,028 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T17:57:01,028 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 99318ad6c4e7b8782230d738424ff705, NAME => 'TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T17:57:01,028 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-16T17:57:01,028 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734371821028"}]},"ts":"9223372036854775807"} 2024-12-16T17:57:01,031 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-16T17:57:01,059 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=32, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:01,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 102 msec 2024-12-16T17:57:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-16T17:57:01,066 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-16T17:57:01,078 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=247 (was 219) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-389028407_22 at /127.0.0.1:51414 [Waiting for operation #28] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/3609ad07831c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;3609ad07831c:39733-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-389028407_22 at /127.0.0.1:53006 [Waiting for operation #388] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-145401003_22 at /127.0.0.1:38494 [Waiting for operation #372] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=221 (was 120) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3690 (was 4233) 2024-12-16T17:57:01,086 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=247, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=221, ProcessCount=11, AvailableMemoryMB=3690 2024-12-16T17:57:01,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:57:01,088 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:57:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=33, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:01,090 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T17:57:01,091 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:01,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 33 2024-12-16T17:57:01,091 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T17:57:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-16T17:57:01,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741965_1141 (size=963) 2024-12-16T17:57:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-16T17:57:01,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-16T17:57:01,505 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:57:01,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741966_1142 (size=53) 2024-12-16T17:57:01,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-16T17:57:01,916 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:01,916 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7212a7dec92fa5781081695b56d809ad, disabling compactions & flushes 2024-12-16T17:57:01,916 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:01,916 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:01,916 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. after waiting 0 ms 2024-12-16T17:57:01,916 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:01,917 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:01,917 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:01,920 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T17:57:01,920 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734371821920"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734371821920"}]},"ts":"1734371821920"} 2024-12-16T17:57:01,924 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T17:57:01,925 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T17:57:01,925 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371821925"}]},"ts":"1734371821925"} 2024-12-16T17:57:01,926 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-16T17:57:02,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, ASSIGN}] 2024-12-16T17:57:02,044 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, ASSIGN 2024-12-16T17:57:02,045 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, ASSIGN; state=OFFLINE, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=false 2024-12-16T17:57:02,196 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:02,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; OpenRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-16T17:57:02,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:02,358 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:02,359 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7285): Opening region: {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:57:02,359 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,359 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:02,359 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7327): checking encryption for 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,359 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7330): checking classloading for 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,361 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,363 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:02,364 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7212a7dec92fa5781081695b56d809ad columnFamilyName A 2024-12-16T17:57:02,364 DEBUG [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:02,365 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(327): Store=7212a7dec92fa5781081695b56d809ad/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:02,365 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,367 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:02,368 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7212a7dec92fa5781081695b56d809ad columnFamilyName B 2024-12-16T17:57:02,368 DEBUG [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:02,369 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(327): Store=7212a7dec92fa5781081695b56d809ad/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:02,369 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,371 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:02,371 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7212a7dec92fa5781081695b56d809ad columnFamilyName C 2024-12-16T17:57:02,371 DEBUG [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:02,372 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(327): Store=7212a7dec92fa5781081695b56d809ad/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:02,372 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:02,373 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,373 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,375 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:57:02,376 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1085): writing seq id for 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:02,379 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:57:02,379 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1102): Opened 7212a7dec92fa5781081695b56d809ad; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70393422, jitterRate=0.04894372820854187}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:57:02,380 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1001): Region open journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:02,381 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., pid=35, masterSystemTime=1734371822354 2024-12-16T17:57:02,382 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:02,383 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:02,383 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:02,385 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-16T17:57:02,386 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; OpenRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 in 184 msec 2024-12-16T17:57:02,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-12-16T17:57:02,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, ASSIGN in 344 msec 2024-12-16T17:57:02,387 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T17:57:02,388 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371822387"}]},"ts":"1734371822387"} 2024-12-16T17:57:02,389 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-16T17:57:02,400 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T17:57:02,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3120 sec 2024-12-16T17:57:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-16T17:57:03,205 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 33 completed 2024-12-16T17:57:03,209 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x232d9608 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72234fc9 2024-12-16T17:57:03,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@356ad985, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:03,256 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:03,260 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:03,264 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T17:57:03,266 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T17:57:03,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:57:03,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:57:03,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:03,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741967_1143 (size=999) 2024-12-16T17:57:03,709 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-16T17:57:03,709 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-16T17:57:03,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:57:03,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, REOPEN/MOVE}] 2024-12-16T17:57:03,729 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, REOPEN/MOVE 2024-12-16T17:57:03,729 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:03,731 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:57:03,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:03,882 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:03,883 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:03,883 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:57:03,883 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 7212a7dec92fa5781081695b56d809ad, disabling compactions & flushes 2024-12-16T17:57:03,883 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:03,883 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:03,883 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. after waiting 0 ms 2024-12-16T17:57:03,883 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:03,887 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-16T17:57:03,888 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:03,888 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:03,888 WARN [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionServer(3786): Not adding moved region record: 7212a7dec92fa5781081695b56d809ad to self. 2024-12-16T17:57:03,890 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:03,890 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=CLOSED 2024-12-16T17:57:03,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-16T17:57:03,893 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 in 160 msec 2024-12-16T17:57:03,893 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, REOPEN/MOVE; state=CLOSED, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=true 2024-12-16T17:57:04,044 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE; OpenRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:04,200 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,207 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,207 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7285): Opening region: {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:57:04,208 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,208 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:04,209 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7327): checking encryption for 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,209 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7330): checking classloading for 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,213 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,214 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:04,220 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7212a7dec92fa5781081695b56d809ad columnFamilyName A 2024-12-16T17:57:04,223 DEBUG [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:04,223 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(327): Store=7212a7dec92fa5781081695b56d809ad/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:04,224 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,225 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:04,225 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7212a7dec92fa5781081695b56d809ad columnFamilyName B 2024-12-16T17:57:04,225 DEBUG [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:04,225 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(327): Store=7212a7dec92fa5781081695b56d809ad/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:04,225 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,226 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:04,226 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7212a7dec92fa5781081695b56d809ad columnFamilyName C 2024-12-16T17:57:04,226 DEBUG [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:04,226 INFO [StoreOpener-7212a7dec92fa5781081695b56d809ad-1 {}] regionserver.HStore(327): Store=7212a7dec92fa5781081695b56d809ad/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:04,227 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,227 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,228 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,229 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:57:04,230 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1085): writing seq id for 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,231 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1102): Opened 7212a7dec92fa5781081695b56d809ad; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62261383, jitterRate=-0.07223309576511383}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:57:04,232 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1001): Region open journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:04,232 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., pid=40, masterSystemTime=1734371824199 2024-12-16T17:57:04,234 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,234 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,234 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=OPEN, openSeqNum=5, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-16T17:57:04,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; OpenRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 in 188 msec 2024-12-16T17:57:04,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-16T17:57:04,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, REOPEN/MOVE in 508 msec 2024-12-16T17:57:04,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-16T17:57:04,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 523 msec 2024-12-16T17:57:04,242 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 966 msec 2024-12-16T17:57:04,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-16T17:57:04,250 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7dd223a4 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@675abe01 2024-12-16T17:57:04,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@113ec455, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,304 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x590870d6 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4cbad329 2024-12-16T17:57:04,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2430641a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,319 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2058489b to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50e0ac06 2024-12-16T17:57:04,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ad743e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,335 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e5b8c2 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@203c6f7d 2024-12-16T17:57:04,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58bc25a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,351 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x582ce1c8 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@717bc61 2024-12-16T17:57:04,359 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c5527cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,360 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67a7fd85 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1239f5c5 2024-12-16T17:57:04,366 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51403431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,368 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x191da8cb to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4457f1f0 2024-12-16T17:57:04,375 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1466a4f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,376 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50021c01 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b21af80 2024-12-16T17:57:04,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ac1f70f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,385 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x031a73c1 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2561918d 2024-12-16T17:57:04,391 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@209d73a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:04,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:04,396 DEBUG [hconnection-0x200f5271-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-16T17:57:04,397 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:04,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T17:57:04,398 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:04,398 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:04,399 DEBUG [hconnection-0xb722434-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,399 DEBUG [hconnection-0x72fcf40-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,399 DEBUG [hconnection-0x6f22c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,399 DEBUG [hconnection-0x22db0e45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,401 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,401 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,401 DEBUG [hconnection-0x7e456dba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,401 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,401 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,401 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,401 DEBUG [hconnection-0x4856fb5a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,402 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,402 DEBUG [hconnection-0x2ea9014a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,402 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,403 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41652, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,404 DEBUG [hconnection-0x481990e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:04,405 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41656, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:04,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:04,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:04,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:04,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:04,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:04,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:04,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:04,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371884449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371884451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371884453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371884454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371884455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121690afff550bc5458d9f70c8e32d0c0e39_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371824411/Put/seqid=0 2024-12-16T17:57:04,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T17:57:04,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741968_1144 (size=12154) 2024-12-16T17:57:04,505 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:04,510 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121690afff550bc5458d9f70c8e32d0c0e39_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121690afff550bc5458d9f70c8e32d0c0e39_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:04,512 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/e7a2c69b873d41a8a8ecdd9801ba0d17, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:04,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/e7a2c69b873d41a8a8ecdd9801ba0d17 is 175, key is test_row_0/A:col10/1734371824411/Put/seqid=0 2024-12-16T17:57:04,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741969_1145 (size=30955) 2024-12-16T17:57:04,534 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/e7a2c69b873d41a8a8ecdd9801ba0d17 2024-12-16T17:57:04,549 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-16T17:57:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371884557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371884557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371884557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371884559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371884560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/cde3158a9b194369a7a97a3584e33c0d is 50, key is test_row_0/B:col10/1734371824411/Put/seqid=0 2024-12-16T17:57:04,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741970_1146 (size=12001) 2024-12-16T17:57:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T17:57:04,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-16T17:57:04,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:04,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371884760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371884760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371884760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371884762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:04,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371884762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,860 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:04,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-16T17:57:04,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:04,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:04,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:04,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/cde3158a9b194369a7a97a3584e33c0d 2024-12-16T17:57:05,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T17:57:05,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-16T17:57:05,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:05,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:05,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:05,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:05,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:05,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:05,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d931f50d815a4247a6a3494b887c3b1b is 50, key is test_row_0/C:col10/1734371824411/Put/seqid=0 2024-12-16T17:57:05,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371885063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371885064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371885063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371885065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371885065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741971_1147 (size=12001) 2024-12-16T17:57:05,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d931f50d815a4247a6a3494b887c3b1b 2024-12-16T17:57:05,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/e7a2c69b873d41a8a8ecdd9801ba0d17 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7a2c69b873d41a8a8ecdd9801ba0d17 2024-12-16T17:57:05,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7a2c69b873d41a8a8ecdd9801ba0d17, entries=150, sequenceid=15, filesize=30.2 K 2024-12-16T17:57:05,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/cde3158a9b194369a7a97a3584e33c0d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cde3158a9b194369a7a97a3584e33c0d 2024-12-16T17:57:05,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cde3158a9b194369a7a97a3584e33c0d, entries=150, sequenceid=15, filesize=11.7 K 2024-12-16T17:57:05,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d931f50d815a4247a6a3494b887c3b1b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d931f50d815a4247a6a3494b887c3b1b 2024-12-16T17:57:05,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d931f50d815a4247a6a3494b887c3b1b, entries=150, sequenceid=15, filesize=11.7 K 2024-12-16T17:57:05,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7212a7dec92fa5781081695b56d809ad in 712ms, sequenceid=15, compaction requested=false 2024-12-16T17:57:05,127 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-16T17:57:05,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:05,167 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-16T17:57:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:05,168 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:57:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:05,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:05,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:05,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:05,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:05,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:05,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216aa6bd760860f47fa9ad2324543fe39e3_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371824452/Put/seqid=0 2024-12-16T17:57:05,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741972_1148 (size=12154) 2024-12-16T17:57:05,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:05,263 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216aa6bd760860f47fa9ad2324543fe39e3_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216aa6bd760860f47fa9ad2324543fe39e3_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:05,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dcc91abd2eeb42f6a5223b33e78b2eac, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:05,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dcc91abd2eeb42f6a5223b33e78b2eac is 175, key is test_row_0/A:col10/1734371824452/Put/seqid=0 2024-12-16T17:57:05,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741973_1149 (size=30955) 2024-12-16T17:57:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T17:57:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:05,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:05,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371885628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371885633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371885637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371885638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371885642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,680 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dcc91abd2eeb42f6a5223b33e78b2eac 2024-12-16T17:57:05,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/6a52563fbfd5403f9cf1ae3902893399 is 50, key is test_row_0/B:col10/1734371824452/Put/seqid=0 2024-12-16T17:57:05,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741974_1150 (size=12001) 2024-12-16T17:57:05,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371885741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371885741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371885742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371885745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371885747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,911 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-16T17:57:05,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371885947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371885947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371885950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371885951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:05,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371885952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,142 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/6a52563fbfd5403f9cf1ae3902893399 2024-12-16T17:57:06,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/4b3db31384484ed8a1ad16415a6fb13c is 50, key is test_row_0/C:col10/1734371824452/Put/seqid=0 2024-12-16T17:57:06,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741975_1151 (size=12001) 2024-12-16T17:57:06,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371886253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371886254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371886257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371886260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371886263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T17:57:06,614 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/4b3db31384484ed8a1ad16415a6fb13c 2024-12-16T17:57:06,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dcc91abd2eeb42f6a5223b33e78b2eac as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dcc91abd2eeb42f6a5223b33e78b2eac 2024-12-16T17:57:06,636 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dcc91abd2eeb42f6a5223b33e78b2eac, entries=150, sequenceid=41, filesize=30.2 K 2024-12-16T17:57:06,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/6a52563fbfd5403f9cf1ae3902893399 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6a52563fbfd5403f9cf1ae3902893399 2024-12-16T17:57:06,651 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6a52563fbfd5403f9cf1ae3902893399, entries=150, sequenceid=41, filesize=11.7 K 2024-12-16T17:57:06,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/4b3db31384484ed8a1ad16415a6fb13c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/4b3db31384484ed8a1ad16415a6fb13c 2024-12-16T17:57:06,665 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T17:57:06,667 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T17:57:06,667 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/4b3db31384484ed8a1ad16415a6fb13c, entries=150, sequenceid=41, filesize=11.7 K 2024-12-16T17:57:06,676 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 7212a7dec92fa5781081695b56d809ad in 1508ms, sequenceid=41, compaction requested=false 2024-12-16T17:57:06,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:06,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:06,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-16T17:57:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-16T17:57:06,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-16T17:57:06,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2800 sec 2024-12-16T17:57:06,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 2.2870 sec 2024-12-16T17:57:06,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:57:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:06,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:06,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216f2c8a5725e9f4213b80c889c8cd28d9f_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371825571/Put/seqid=0 2024-12-16T17:57:06,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371886799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371886800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371886805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371886807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371886806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741976_1152 (size=17034) 2024-12-16T17:57:06,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371886908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371886909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371886909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371886911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:06,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:06,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371886911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371887112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371887113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371887115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371887116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371887117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,233 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:07,240 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216f2c8a5725e9f4213b80c889c8cd28d9f_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216f2c8a5725e9f4213b80c889c8cd28d9f_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:07,243 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/8382e2b8d4f64b768f197bcaf208a283, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:07,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/8382e2b8d4f64b768f197bcaf208a283 is 175, key is test_row_0/A:col10/1734371825571/Put/seqid=0 2024-12-16T17:57:07,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741977_1153 (size=48139) 2024-12-16T17:57:07,275 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/8382e2b8d4f64b768f197bcaf208a283 2024-12-16T17:57:07,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/ede95ed9a256403d95b8292ef99cd61a is 50, key is test_row_0/B:col10/1734371825571/Put/seqid=0 2024-12-16T17:57:07,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741978_1154 (size=12001) 2024-12-16T17:57:07,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/ede95ed9a256403d95b8292ef99cd61a 2024-12-16T17:57:07,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/a5d4761eae92409585ad222fdf5cf957 is 50, key is test_row_0/C:col10/1734371825571/Put/seqid=0 2024-12-16T17:57:07,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741979_1155 (size=12001) 2024-12-16T17:57:07,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/a5d4761eae92409585ad222fdf5cf957 2024-12-16T17:57:07,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/8382e2b8d4f64b768f197bcaf208a283 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/8382e2b8d4f64b768f197bcaf208a283 2024-12-16T17:57:07,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/8382e2b8d4f64b768f197bcaf208a283, entries=250, sequenceid=54, filesize=47.0 K 2024-12-16T17:57:07,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/ede95ed9a256403d95b8292ef99cd61a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ede95ed9a256403d95b8292ef99cd61a 2024-12-16T17:57:07,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ede95ed9a256403d95b8292ef99cd61a, entries=150, sequenceid=54, filesize=11.7 K 2024-12-16T17:57:07,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/a5d4761eae92409585ad222fdf5cf957 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/a5d4761eae92409585ad222fdf5cf957 2024-12-16T17:57:07,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/a5d4761eae92409585ad222fdf5cf957, entries=150, sequenceid=54, filesize=11.7 K 2024-12-16T17:57:07,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7212a7dec92fa5781081695b56d809ad in 632ms, sequenceid=54, compaction requested=true 2024-12-16T17:57:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:07,395 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:07,395 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:07,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:07,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:07,397 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:07,397 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/A is initiating minor compaction (all files) 2024-12-16T17:57:07,397 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/A in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:07,398 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7a2c69b873d41a8a8ecdd9801ba0d17, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dcc91abd2eeb42f6a5223b33e78b2eac, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/8382e2b8d4f64b768f197bcaf208a283] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=107.5 K 2024-12-16T17:57:07,398 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:07,398 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7a2c69b873d41a8a8ecdd9801ba0d17, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dcc91abd2eeb42f6a5223b33e78b2eac, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/8382e2b8d4f64b768f197bcaf208a283] 2024-12-16T17:57:07,398 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:07,398 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/B is initiating minor compaction (all files) 2024-12-16T17:57:07,398 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/B in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:07,398 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cde3158a9b194369a7a97a3584e33c0d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6a52563fbfd5403f9cf1ae3902893399, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ede95ed9a256403d95b8292ef99cd61a] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=35.2 K 2024-12-16T17:57:07,399 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting cde3158a9b194369a7a97a3584e33c0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734371824411 2024-12-16T17:57:07,399 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7a2c69b873d41a8a8ecdd9801ba0d17, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734371824411 2024-12-16T17:57:07,400 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a52563fbfd5403f9cf1ae3902893399, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734371824447 2024-12-16T17:57:07,400 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcc91abd2eeb42f6a5223b33e78b2eac, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734371824447 2024-12-16T17:57:07,401 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ede95ed9a256403d95b8292ef99cd61a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734371825571 2024-12-16T17:57:07,401 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8382e2b8d4f64b768f197bcaf208a283, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734371825571 2024-12-16T17:57:07,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:07,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:07,416 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:07,422 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#B#compaction#136 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:07,423 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/193c1325600f4b45a0ffca178f4ec64c is 50, key is test_row_0/B:col10/1734371825571/Put/seqid=0 2024-12-16T17:57:07,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:07,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:57:07,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:07,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:07,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:07,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:07,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:07,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:07,434 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216e7f92678ddef47f7b266b6c594e264a9_7212a7dec92fa5781081695b56d809ad store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:07,441 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216e7f92678ddef47f7b266b6c594e264a9_7212a7dec92fa5781081695b56d809ad, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:07,441 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216e7f92678ddef47f7b266b6c594e264a9_7212a7dec92fa5781081695b56d809ad because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:07,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371887435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371887434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371887446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371887449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371887449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741980_1156 (size=12104) 2024-12-16T17:57:07,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121680072e0a39584fcd9e5264f541d0ce40_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371826803/Put/seqid=0 2024-12-16T17:57:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741981_1157 (size=4469) 2024-12-16T17:57:07,549 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#A#compaction#135 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:07,551 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/373c3802fb174c3f964f48d4def4af6f is 175, key is test_row_0/A:col10/1734371825571/Put/seqid=0 2024-12-16T17:57:07,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371887550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371887550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371887554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371887554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371887554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741982_1158 (size=12154) 2024-12-16T17:57:07,578 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:07,584 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121680072e0a39584fcd9e5264f541d0ce40_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121680072e0a39584fcd9e5264f541d0ce40_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:07,585 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f67b9ee5e63546dbba0bb290afc45d7c, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:07,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f67b9ee5e63546dbba0bb290afc45d7c is 175, key is test_row_0/A:col10/1734371826803/Put/seqid=0 2024-12-16T17:57:07,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741983_1159 (size=31058) 2024-12-16T17:57:07,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741984_1160 (size=30955) 2024-12-16T17:57:07,633 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f67b9ee5e63546dbba0bb290afc45d7c 2024-12-16T17:57:07,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/7cedf7ac9f484cf6aa4aed7bd21913b5 is 50, key is test_row_0/B:col10/1734371826803/Put/seqid=0 2024-12-16T17:57:07,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741985_1161 (size=12001) 2024-12-16T17:57:07,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/7cedf7ac9f484cf6aa4aed7bd21913b5 2024-12-16T17:57:07,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/16259b4a9fa74971bd76489851d9c7f2 is 50, key is test_row_0/C:col10/1734371826803/Put/seqid=0 2024-12-16T17:57:07,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741986_1162 (size=12001) 2024-12-16T17:57:07,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/16259b4a9fa74971bd76489851d9c7f2 2024-12-16T17:57:07,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f67b9ee5e63546dbba0bb290afc45d7c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f67b9ee5e63546dbba0bb290afc45d7c 2024-12-16T17:57:07,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f67b9ee5e63546dbba0bb290afc45d7c, entries=150, sequenceid=79, filesize=30.2 K 2024-12-16T17:57:07,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/7cedf7ac9f484cf6aa4aed7bd21913b5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/7cedf7ac9f484cf6aa4aed7bd21913b5 2024-12-16T17:57:07,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/7cedf7ac9f484cf6aa4aed7bd21913b5, entries=150, sequenceid=79, filesize=11.7 K 2024-12-16T17:57:07,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/16259b4a9fa74971bd76489851d9c7f2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/16259b4a9fa74971bd76489851d9c7f2 2024-12-16T17:57:07,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371887759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371887759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371887761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371887761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:07,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371887761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:07,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/16259b4a9fa74971bd76489851d9c7f2, entries=150, sequenceid=79, filesize=11.7 K 2024-12-16T17:57:07,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 7212a7dec92fa5781081695b56d809ad in 342ms, sequenceid=79, compaction requested=true 2024-12-16T17:57:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-16T17:57:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-16T17:57:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:07,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-16T17:57:07,900 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/193c1325600f4b45a0ffca178f4ec64c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/193c1325600f4b45a0ffca178f4ec64c 2024-12-16T17:57:07,909 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/B of 7212a7dec92fa5781081695b56d809ad into 193c1325600f4b45a0ffca178f4ec64c(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:07,909 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:07,909 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/B, priority=13, startTime=1734371827395; duration=0sec 2024-12-16T17:57:07,910 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-16T17:57:07,910 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:07,910 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-16T17:57:07,911 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-16T17:57:07,911 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-16T17:57:07,911 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:07,911 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:07,911 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:07,913 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:07,913 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/C is initiating minor compaction (all files) 2024-12-16T17:57:07,913 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/C in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:07,913 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d931f50d815a4247a6a3494b887c3b1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/4b3db31384484ed8a1ad16415a6fb13c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/a5d4761eae92409585ad222fdf5cf957, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/16259b4a9fa74971bd76489851d9c7f2] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=46.9 K 2024-12-16T17:57:07,914 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d931f50d815a4247a6a3494b887c3b1b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734371824411 2024-12-16T17:57:07,914 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b3db31384484ed8a1ad16415a6fb13c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734371824447 2024-12-16T17:57:07,915 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting a5d4761eae92409585ad222fdf5cf957, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734371825571 2024-12-16T17:57:07,915 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 16259b4a9fa74971bd76489851d9c7f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734371826800 2024-12-16T17:57:07,931 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#C#compaction#140 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:07,931 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/7f51f3735b1b43e88139f4816ffcd031 is 50, key is test_row_0/C:col10/1734371826803/Put/seqid=0 2024-12-16T17:57:07,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741987_1163 (size=12139) 2024-12-16T17:57:08,013 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/373c3802fb174c3f964f48d4def4af6f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/373c3802fb174c3f964f48d4def4af6f 2024-12-16T17:57:08,020 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/A of 7212a7dec92fa5781081695b56d809ad into 373c3802fb174c3f964f48d4def4af6f(size=30.3 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:08,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:08,020 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/A, priority=13, startTime=1734371827395; duration=0sec 2024-12-16T17:57:08,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-16T17:57:08,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:08,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:08,021 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-12-16T17:57:08,021 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-16T17:57:08,021 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-16T17:57:08,021 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:08,021 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:08,021 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-16T17:57:08,024 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-16T17:57:08,024 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-16T17:57:08,024 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:08,024 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:08,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:57:08,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:08,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:08,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:08,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:08,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:08,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:08,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216955de616b26e4e3bb643e7f6cb6f243c_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371828062/Put/seqid=0 2024-12-16T17:57:08,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741988_1164 (size=12154) 2024-12-16T17:57:08,114 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:08,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371888095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371888105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371888122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371888128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371888130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,132 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216955de616b26e4e3bb643e7f6cb6f243c_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216955de616b26e4e3bb643e7f6cb6f243c_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:08,133 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/6e4bd1593f7e4a5aa3d044ed97abfa43, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:08,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/6e4bd1593f7e4a5aa3d044ed97abfa43 is 175, key is test_row_0/A:col10/1734371828062/Put/seqid=0 2024-12-16T17:57:08,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741989_1165 (size=30955) 2024-12-16T17:57:08,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371888223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371888230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371888230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371888230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371888231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,372 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/7f51f3735b1b43e88139f4816ffcd031 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/7f51f3735b1b43e88139f4816ffcd031 2024-12-16T17:57:08,379 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/C of 7212a7dec92fa5781081695b56d809ad into 7f51f3735b1b43e88139f4816ffcd031(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:08,379 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:08,379 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/C, priority=12, startTime=1734371827766; duration=0sec 2024-12-16T17:57:08,380 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:08,380 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:08,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371888429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371888432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371888434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371888434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371888435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T17:57:08,505 INFO [Thread-705 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-16T17:57:08,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:08,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-16T17:57:08,508 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-16T17:57:08,508 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:08,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:08,571 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/6e4bd1593f7e4a5aa3d044ed97abfa43 2024-12-16T17:57:08,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/dad3023d67d24c24b0fafbe3f107dcb8 is 50, key is test_row_0/B:col10/1734371828062/Put/seqid=0 2024-12-16T17:57:08,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741990_1166 (size=12001) 2024-12-16T17:57:08,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/dad3023d67d24c24b0fafbe3f107dcb8 2024-12-16T17:57:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-16T17:57:08,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/0066b68d19ba49c2ab5b2ac5b7e384a7 is 50, key is test_row_0/C:col10/1734371828062/Put/seqid=0 2024-12-16T17:57:08,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741991_1167 (size=12001) 2024-12-16T17:57:08,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/0066b68d19ba49c2ab5b2ac5b7e384a7 2024-12-16T17:57:08,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/6e4bd1593f7e4a5aa3d044ed97abfa43 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/6e4bd1593f7e4a5aa3d044ed97abfa43 2024-12-16T17:57:08,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/6e4bd1593f7e4a5aa3d044ed97abfa43, entries=150, sequenceid=94, filesize=30.2 K 2024-12-16T17:57:08,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/dad3023d67d24c24b0fafbe3f107dcb8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/dad3023d67d24c24b0fafbe3f107dcb8 2024-12-16T17:57:08,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:08,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:08,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/dad3023d67d24c24b0fafbe3f107dcb8, entries=150, sequenceid=94, filesize=11.7 K 2024-12-16T17:57:08,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/0066b68d19ba49c2ab5b2ac5b7e384a7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0066b68d19ba49c2ab5b2ac5b7e384a7 2024-12-16T17:57:08,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0066b68d19ba49c2ab5b2ac5b7e384a7, entries=150, sequenceid=94, filesize=11.7 K 2024-12-16T17:57:08,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 7212a7dec92fa5781081695b56d809ad in 625ms, sequenceid=94, compaction requested=true 2024-12-16T17:57:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:08,689 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:08,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:08,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-16T17:57:08,690 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:08,691 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:08,691 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/A is initiating minor compaction (all files) 2024-12-16T17:57:08,691 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/A in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,691 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/373c3802fb174c3f964f48d4def4af6f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f67b9ee5e63546dbba0bb290afc45d7c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/6e4bd1593f7e4a5aa3d044ed97abfa43] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=90.8 K 2024-12-16T17:57:08,691 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,691 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/373c3802fb174c3f964f48d4def4af6f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f67b9ee5e63546dbba0bb290afc45d7c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/6e4bd1593f7e4a5aa3d044ed97abfa43] 2024-12-16T17:57:08,691 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:08,691 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 373c3802fb174c3f964f48d4def4af6f, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734371825571 2024-12-16T17:57:08,691 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/B is initiating minor compaction (all files) 2024-12-16T17:57:08,692 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/B in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,692 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/193c1325600f4b45a0ffca178f4ec64c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/7cedf7ac9f484cf6aa4aed7bd21913b5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/dad3023d67d24c24b0fafbe3f107dcb8] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=35.3 K 2024-12-16T17:57:08,692 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f67b9ee5e63546dbba0bb290afc45d7c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734371826800 2024-12-16T17:57:08,693 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 193c1325600f4b45a0ffca178f4ec64c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734371825571 2024-12-16T17:57:08,693 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e4bd1593f7e4a5aa3d044ed97abfa43, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734371827445 2024-12-16T17:57:08,694 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cedf7ac9f484cf6aa4aed7bd21913b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734371826800 2024-12-16T17:57:08,695 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting dad3023d67d24c24b0fafbe3f107dcb8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734371827445 2024-12-16T17:57:08,706 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#B#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:08,706 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/d1d05544a6f04c359e7da403235cc9ab is 50, key is test_row_0/B:col10/1734371828062/Put/seqid=0 2024-12-16T17:57:08,708 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:08,716 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216a5c87f1953124ff7bc628f3fce4533c1_7212a7dec92fa5781081695b56d809ad store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:08,718 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216a5c87f1953124ff7bc628f3fce4533c1_7212a7dec92fa5781081695b56d809ad, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:08,718 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a5c87f1953124ff7bc628f3fce4533c1_7212a7dec92fa5781081695b56d809ad because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:08,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741992_1168 (size=12207) 2024-12-16T17:57:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:08,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:57:08,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:08,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:08,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:08,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:08,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:08,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:08,740 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/d1d05544a6f04c359e7da403235cc9ab as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d1d05544a6f04c359e7da403235cc9ab 2024-12-16T17:57:08,746 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/B of 7212a7dec92fa5781081695b56d809ad into d1d05544a6f04c359e7da403235cc9ab(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:08,746 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:08,747 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/B, priority=13, startTime=1734371828689; duration=0sec 2024-12-16T17:57:08,747 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:08,747 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:08,747 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-16T17:57:08,748 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-16T17:57:08,748 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-16T17:57:08,749 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:08,749 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:08,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371888745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371888745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371888746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371888751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371888751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168797999db3a94280b90b1ad29b787fd0_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371828101/Put/seqid=0 2024-12-16T17:57:08,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741993_1169 (size=4469) 2024-12-16T17:57:08,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741994_1170 (size=14594) 2024-12-16T17:57:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-16T17:57:08,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:08,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:08,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371888852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371888852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371888852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371888855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371888855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,971 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:08,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:08,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:08,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:08,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:08,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371889055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371889056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371889056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371889056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371889057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-16T17:57:09,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:09,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:09,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,171 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#A#compaction#145 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:09,171 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/31fc5bdb99ac439ca2196ede353c8165 is 175, key is test_row_0/A:col10/1734371828062/Put/seqid=0 2024-12-16T17:57:09,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741995_1171 (size=31161) 2024-12-16T17:57:09,182 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:09,186 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168797999db3a94280b90b1ad29b787fd0_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168797999db3a94280b90b1ad29b787fd0_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:09,187 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f0459809cdf14565a7b91b7cf26dbf34, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:09,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f0459809cdf14565a7b91b7cf26dbf34 is 175, key is test_row_0/A:col10/1734371828101/Put/seqid=0 2024-12-16T17:57:09,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741996_1172 (size=39549) 2024-12-16T17:57:09,200 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f0459809cdf14565a7b91b7cf26dbf34 2024-12-16T17:57:09,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/1f2e2cd8de6d4ad8b79338878becbfe5 is 50, key is test_row_0/B:col10/1734371828101/Put/seqid=0 2024-12-16T17:57:09,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741997_1173 (size=12001) 2024-12-16T17:57:09,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/1f2e2cd8de6d4ad8b79338878becbfe5 2024-12-16T17:57:09,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/0fb1db9cfcd2468085fb2e8ad058f470 is 50, key is test_row_0/C:col10/1734371828101/Put/seqid=0 2024-12-16T17:57:09,276 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:09,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:09,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741998_1174 (size=12001) 2024-12-16T17:57:09,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371889359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371889360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371889360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371889361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371889363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:09,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:09,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,583 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/31fc5bdb99ac439ca2196ede353c8165 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/31fc5bdb99ac439ca2196ede353c8165 2024-12-16T17:57:09,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:09,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:09,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:09,596 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/A of 7212a7dec92fa5781081695b56d809ad into 31fc5bdb99ac439ca2196ede353c8165(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:09,596 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:09,596 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/A, priority=13, startTime=1734371828689; duration=0sec 2024-12-16T17:57:09,596 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:09,596 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-16T17:57:09,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/0fb1db9cfcd2468085fb2e8ad058f470 2024-12-16T17:57:09,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f0459809cdf14565a7b91b7cf26dbf34 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f0459809cdf14565a7b91b7cf26dbf34 2024-12-16T17:57:09,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f0459809cdf14565a7b91b7cf26dbf34, entries=200, sequenceid=120, filesize=38.6 K 2024-12-16T17:57:09,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/1f2e2cd8de6d4ad8b79338878becbfe5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1f2e2cd8de6d4ad8b79338878becbfe5 2024-12-16T17:57:09,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1f2e2cd8de6d4ad8b79338878becbfe5, entries=150, sequenceid=120, filesize=11.7 K 2024-12-16T17:57:09,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/0fb1db9cfcd2468085fb2e8ad058f470 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0fb1db9cfcd2468085fb2e8ad058f470 2024-12-16T17:57:09,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0fb1db9cfcd2468085fb2e8ad058f470, entries=150, sequenceid=120, filesize=11.7 K 2024-12-16T17:57:09,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7212a7dec92fa5781081695b56d809ad in 994ms, sequenceid=120, compaction requested=true 2024-12-16T17:57:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:09,730 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-16T17:57:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:09,730 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-16T17:57:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:09,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:09,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:09,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:09,732 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:09,733 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:09,734 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:09,735 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/C is initiating minor compaction (all files) 2024-12-16T17:57:09,735 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/C in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,735 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/7f51f3735b1b43e88139f4816ffcd031, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0066b68d19ba49c2ab5b2ac5b7e384a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0fb1db9cfcd2468085fb2e8ad058f470] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=35.3 K 2024-12-16T17:57:09,736 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f51f3735b1b43e88139f4816ffcd031, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734371826800 2024-12-16T17:57:09,736 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0066b68d19ba49c2ab5b2ac5b7e384a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734371827445 2024-12-16T17:57:09,736 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fb1db9cfcd2468085fb2e8ad058f470, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734371828101 2024-12-16T17:57:09,738 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-16T17:57:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:09,740 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:09,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:09,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:09,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:09,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:09,768 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#C#compaction#149 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:09,768 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/31b6c1416a6d46108b4256276e1536d3 is 50, key is test_row_0/C:col10/1734371828101/Put/seqid=0 2024-12-16T17:57:09,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412162156aa640f214e0ea36a77a12b581c3a_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371828739/Put/seqid=0 2024-12-16T17:57:09,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741999_1175 (size=12254) 2024-12-16T17:57:09,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:09,794 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412162156aa640f214e0ea36a77a12b581c3a_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412162156aa640f214e0ea36a77a12b581c3a_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:09,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dab6500209b4499cbfcc2a9c00c43941, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:09,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dab6500209b4499cbfcc2a9c00c43941 is 175, key is test_row_0/A:col10/1734371828739/Put/seqid=0 2024-12-16T17:57:09,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742000_1176 (size=12241) 2024-12-16T17:57:09,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742001_1177 (size=31055) 2024-12-16T17:57:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:09,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:09,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371889882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371889884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371889885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371889886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371889887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371889989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371889990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371889990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371889991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:09,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371889991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371890192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371890193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371890193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371890193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371890197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,209 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/31b6c1416a6d46108b4256276e1536d3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/31b6c1416a6d46108b4256276e1536d3 2024-12-16T17:57:10,215 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/C of 7212a7dec92fa5781081695b56d809ad into 31b6c1416a6d46108b4256276e1536d3(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:10,215 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:10,215 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/C, priority=13, startTime=1734371829731; duration=0sec 2024-12-16T17:57:10,215 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:10,215 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:10,225 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dab6500209b4499cbfcc2a9c00c43941 2024-12-16T17:57:10,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/437acd4259954da59a2bf09c87d14dea is 50, key is test_row_0/B:col10/1734371828739/Put/seqid=0 2024-12-16T17:57:10,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742002_1178 (size=12101) 2024-12-16T17:57:10,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371890497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371890497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371890498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371890498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:10,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371890502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:10,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-16T17:57:10,654 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/437acd4259954da59a2bf09c87d14dea 2024-12-16T17:57:10,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/92bb9126b28b4121a60bd85f7a66fea0 is 50, key is test_row_0/C:col10/1734371828739/Put/seqid=0 2024-12-16T17:57:10,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742003_1179 (size=12101) 2024-12-16T17:57:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371891000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371891000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:11,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:11,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371891002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:11,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:11,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371891005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:11,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:11,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371891005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:11,073 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/92bb9126b28b4121a60bd85f7a66fea0 2024-12-16T17:57:11,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/dab6500209b4499cbfcc2a9c00c43941 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dab6500209b4499cbfcc2a9c00c43941 2024-12-16T17:57:11,084 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dab6500209b4499cbfcc2a9c00c43941, entries=150, sequenceid=133, filesize=30.3 K 2024-12-16T17:57:11,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/437acd4259954da59a2bf09c87d14dea as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/437acd4259954da59a2bf09c87d14dea 2024-12-16T17:57:11,090 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/437acd4259954da59a2bf09c87d14dea, entries=150, sequenceid=133, filesize=11.8 K 2024-12-16T17:57:11,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/92bb9126b28b4121a60bd85f7a66fea0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/92bb9126b28b4121a60bd85f7a66fea0 2024-12-16T17:57:11,099 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/92bb9126b28b4121a60bd85f7a66fea0, entries=150, sequenceid=133, filesize=11.8 K 2024-12-16T17:57:11,100 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7212a7dec92fa5781081695b56d809ad in 1360ms, sequenceid=133, compaction requested=true 2024-12-16T17:57:11,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:11,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:11,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-16T17:57:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-16T17:57:11,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-16T17:57:11,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5920 sec 2024-12-16T17:57:11,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 2.5990 sec 2024-12-16T17:57:12,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:12,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:57:12,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:12,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:12,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:12,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:12,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:12,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:12,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216bbd5fc7fead7434b9a8aab8173c80215_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371829886/Put/seqid=0 2024-12-16T17:57:12,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742004_1180 (size=14794) 2024-12-16T17:57:12,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371892014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371892016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371892023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371892025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371892025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371892126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371892127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371892127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371892128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371892128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371892329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371892331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371892330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371892332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371892333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,424 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:12,429 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216bbd5fc7fead7434b9a8aab8173c80215_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216bbd5fc7fead7434b9a8aab8173c80215_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:12,431 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2fb859d67a9d4a89b091c05ad4da0a5d, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:12,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2fb859d67a9d4a89b091c05ad4da0a5d is 175, key is test_row_0/A:col10/1734371829886/Put/seqid=0 2024-12-16T17:57:12,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742005_1181 (size=39749) 2024-12-16T17:57:12,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-16T17:57:12,614 INFO [Thread-705 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-16T17:57:12,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:12,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-16T17:57:12,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-16T17:57:12,616 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:12,617 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:12,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:12,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371892632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371892634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371892635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371892636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:12,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371892636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-16T17:57:12,768 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-16T17:57:12,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:12,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:12,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:12,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:12,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:12,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:12,840 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2fb859d67a9d4a89b091c05ad4da0a5d 2024-12-16T17:57:12,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/cbdbe7f032e44f119ab6148bf96a84a2 is 50, key is test_row_0/B:col10/1734371829886/Put/seqid=0 2024-12-16T17:57:12,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742006_1182 (size=12151) 2024-12-16T17:57:12,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/cbdbe7f032e44f119ab6148bf96a84a2 2024-12-16T17:57:12,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d1366337e91f48eaa953f421c2de5039 is 50, key is test_row_0/C:col10/1734371829886/Put/seqid=0 2024-12-16T17:57:12,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742007_1183 (size=12151) 2024-12-16T17:57:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-16T17:57:12,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d1366337e91f48eaa953f421c2de5039 2024-12-16T17:57:12,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:12,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-16T17:57:12,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:12,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:12,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:12,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:12,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:12,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2fb859d67a9d4a89b091c05ad4da0a5d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2fb859d67a9d4a89b091c05ad4da0a5d 2024-12-16T17:57:12,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2fb859d67a9d4a89b091c05ad4da0a5d, entries=200, sequenceid=160, filesize=38.8 K 2024-12-16T17:57:12,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/cbdbe7f032e44f119ab6148bf96a84a2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cbdbe7f032e44f119ab6148bf96a84a2 2024-12-16T17:57:12,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cbdbe7f032e44f119ab6148bf96a84a2, entries=150, sequenceid=160, filesize=11.9 K 2024-12-16T17:57:12,944 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-16T17:57:12,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d1366337e91f48eaa953f421c2de5039 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1366337e91f48eaa953f421c2de5039 2024-12-16T17:57:12,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1366337e91f48eaa953f421c2de5039, entries=150, sequenceid=160, filesize=11.9 K 2024-12-16T17:57:12,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 7212a7dec92fa5781081695b56d809ad in 943ms, sequenceid=160, compaction requested=true 2024-12-16T17:57:12,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:12,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:12,950 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:12,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:12,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:12,950 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:12,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:12,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:12,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:12,951 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141514 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:12,951 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:12,952 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/B is initiating minor compaction (all files) 2024-12-16T17:57:12,952 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/A is initiating minor compaction (all files) 2024-12-16T17:57:12,952 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/A in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:12,952 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/B in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:12,952 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/31fc5bdb99ac439ca2196ede353c8165, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f0459809cdf14565a7b91b7cf26dbf34, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dab6500209b4499cbfcc2a9c00c43941, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2fb859d67a9d4a89b091c05ad4da0a5d] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=138.2 K 2024-12-16T17:57:12,952 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d1d05544a6f04c359e7da403235cc9ab, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1f2e2cd8de6d4ad8b79338878becbfe5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/437acd4259954da59a2bf09c87d14dea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cbdbe7f032e44f119ab6148bf96a84a2] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=47.3 K 2024-12-16T17:57:12,952 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:12,952 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/31fc5bdb99ac439ca2196ede353c8165, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f0459809cdf14565a7b91b7cf26dbf34, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dab6500209b4499cbfcc2a9c00c43941, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2fb859d67a9d4a89b091c05ad4da0a5d] 2024-12-16T17:57:12,952 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31fc5bdb99ac439ca2196ede353c8165, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734371827445 2024-12-16T17:57:12,952 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d1d05544a6f04c359e7da403235cc9ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734371827445 2024-12-16T17:57:12,953 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0459809cdf14565a7b91b7cf26dbf34, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734371828092 2024-12-16T17:57:12,953 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f2e2cd8de6d4ad8b79338878becbfe5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734371828101 2024-12-16T17:57:12,953 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 437acd4259954da59a2bf09c87d14dea, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371828739 2024-12-16T17:57:12,953 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting dab6500209b4499cbfcc2a9c00c43941, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371828739 2024-12-16T17:57:12,953 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting cbdbe7f032e44f119ab6148bf96a84a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734371829883 2024-12-16T17:57:12,954 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fb859d67a9d4a89b091c05ad4da0a5d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734371829883 2024-12-16T17:57:12,963 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:12,964 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#B#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:12,964 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/72e838897fc941f8a2df3ef7e6d11c7a is 50, key is test_row_0/B:col10/1734371829886/Put/seqid=0 2024-12-16T17:57:12,967 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216727088670f174eb49004a5b60cd4ead2_7212a7dec92fa5781081695b56d809ad store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:12,968 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216727088670f174eb49004a5b60cd4ead2_7212a7dec92fa5781081695b56d809ad, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:12,969 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216727088670f174eb49004a5b60cd4ead2_7212a7dec92fa5781081695b56d809ad because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:12,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742008_1184 (size=12493) 2024-12-16T17:57:12,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742009_1185 (size=4469) 2024-12-16T17:57:12,991 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#A#compaction#157 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:12,991 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/cf4111b4a3cf4b11a022e238d987d481 is 175, key is test_row_0/A:col10/1734371829886/Put/seqid=0 2024-12-16T17:57:12,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742010_1186 (size=31447) 2024-12-16T17:57:13,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-16T17:57:13,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:13,076 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:13,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:13,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:13,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:13,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:13,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:13,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:13,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121694c907d4fba24bd6bd6a5a1931737de0_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371832012/Put/seqid=0 2024-12-16T17:57:13,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742011_1187 (size=12304) 2024-12-16T17:57:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:13,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:13,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371893176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371893177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371893177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371893178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371893178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-16T17:57:13,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371893281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371893282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371893282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371893282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371893282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,391 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/72e838897fc941f8a2df3ef7e6d11c7a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/72e838897fc941f8a2df3ef7e6d11c7a 2024-12-16T17:57:13,405 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/B of 7212a7dec92fa5781081695b56d809ad into 72e838897fc941f8a2df3ef7e6d11c7a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:13,405 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:13,405 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/B, priority=12, startTime=1734371832950; duration=0sec 2024-12-16T17:57:13,405 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:13,405 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:13,406 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:13,407 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/cf4111b4a3cf4b11a022e238d987d481 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/cf4111b4a3cf4b11a022e238d987d481 2024-12-16T17:57:13,408 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:13,408 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/C is initiating minor compaction (all files) 2024-12-16T17:57:13,409 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/C in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:13,410 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/31b6c1416a6d46108b4256276e1536d3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/92bb9126b28b4121a60bd85f7a66fea0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1366337e91f48eaa953f421c2de5039] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=35.6 K 2024-12-16T17:57:13,410 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 31b6c1416a6d46108b4256276e1536d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734371828101 2024-12-16T17:57:13,411 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 92bb9126b28b4121a60bd85f7a66fea0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734371828739 2024-12-16T17:57:13,411 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d1366337e91f48eaa953f421c2de5039, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734371829883 2024-12-16T17:57:13,415 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/A of 7212a7dec92fa5781081695b56d809ad into cf4111b4a3cf4b11a022e238d987d481(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:13,415 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:13,415 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/A, priority=12, startTime=1734371832950; duration=0sec 2024-12-16T17:57:13,415 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:13,416 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:13,427 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#C#compaction#159 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:13,428 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/3c82b4c6bd0d4095b3214eda58dfbf88 is 50, key is test_row_0/C:col10/1734371829886/Put/seqid=0 2024-12-16T17:57:13,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742012_1188 (size=12493) 2024-12-16T17:57:13,438 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/3c82b4c6bd0d4095b3214eda58dfbf88 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c82b4c6bd0d4095b3214eda58dfbf88 2024-12-16T17:57:13,445 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/C of 7212a7dec92fa5781081695b56d809ad into 3c82b4c6bd0d4095b3214eda58dfbf88(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:13,445 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:13,445 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/C, priority=13, startTime=1734371832950; duration=0sec 2024-12-16T17:57:13,445 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:13,446 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:13,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371893485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371893485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371893485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371893486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:13,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371893488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,495 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121694c907d4fba24bd6bd6a5a1931737de0_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121694c907d4fba24bd6bd6a5a1931737de0_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:13,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ebcf8b6d96054995b0634e7192da808e, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:13,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ebcf8b6d96054995b0634e7192da808e is 175, key is test_row_0/A:col10/1734371832012/Put/seqid=0 2024-12-16T17:57:13,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742013_1189 (size=31105) 2024-12-16T17:57:13,524 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ebcf8b6d96054995b0634e7192da808e 2024-12-16T17:57:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/fa811f5b58e54cf592047382f86c8500 is 50, key is test_row_0/B:col10/1734371832012/Put/seqid=0 2024-12-16T17:57:13,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742014_1190 (size=12151) 2024-12-16T17:57:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-16T17:57:13,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371893787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371893788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371893789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371893790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371893793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:13,983 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/fa811f5b58e54cf592047382f86c8500 2024-12-16T17:57:13,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/1cba54bfa6a7486d894a85c13f4598f3 is 50, key is test_row_0/C:col10/1734371832012/Put/seqid=0 2024-12-16T17:57:14,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742015_1191 (size=12151) 2024-12-16T17:57:14,002 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/1cba54bfa6a7486d894a85c13f4598f3 2024-12-16T17:57:14,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ebcf8b6d96054995b0634e7192da808e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ebcf8b6d96054995b0634e7192da808e 2024-12-16T17:57:14,017 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ebcf8b6d96054995b0634e7192da808e, entries=150, sequenceid=171, filesize=30.4 K 2024-12-16T17:57:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/fa811f5b58e54cf592047382f86c8500 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/fa811f5b58e54cf592047382f86c8500 2024-12-16T17:57:14,023 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/fa811f5b58e54cf592047382f86c8500, entries=150, sequenceid=171, filesize=11.9 K 2024-12-16T17:57:14,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/1cba54bfa6a7486d894a85c13f4598f3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/1cba54bfa6a7486d894a85c13f4598f3 2024-12-16T17:57:14,041 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/1cba54bfa6a7486d894a85c13f4598f3, entries=150, sequenceid=171, filesize=11.9 K 2024-12-16T17:57:14,044 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7212a7dec92fa5781081695b56d809ad in 968ms, sequenceid=171, compaction requested=false 2024-12-16T17:57:14,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:14,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:14,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-16T17:57:14,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-16T17:57:14,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-16T17:57:14,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4290 sec 2024-12-16T17:57:14,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.4350 sec 2024-12-16T17:57:14,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:14,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:57:14,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:14,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:14,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:14,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:14,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:14,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:14,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216cabb03eacfe94d4e9f2b12485c728d17_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371833176/Put/seqid=0 2024-12-16T17:57:14,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742016_1192 (size=12304) 2024-12-16T17:57:14,324 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,328 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216cabb03eacfe94d4e9f2b12485c728d17_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216cabb03eacfe94d4e9f2b12485c728d17_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:14,329 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba02e84dc04245adb2c3df3c4b69e98a, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:14,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba02e84dc04245adb2c3df3c4b69e98a is 175, key is test_row_0/A:col10/1734371833176/Put/seqid=0 2024-12-16T17:57:14,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371894331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371894331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371894332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371894334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371894335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742017_1193 (size=31105) 2024-12-16T17:57:14,345 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba02e84dc04245adb2c3df3c4b69e98a 2024-12-16T17:57:14,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/2892322bc4f64337b29f7d0743eb2980 is 50, key is test_row_0/B:col10/1734371833176/Put/seqid=0 2024-12-16T17:57:14,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742018_1194 (size=12151) 2024-12-16T17:57:14,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/2892322bc4f64337b29f7d0743eb2980 2024-12-16T17:57:14,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/e65e8ebdfc614858b13553354a4f07b7 is 50, key is test_row_0/C:col10/1734371833176/Put/seqid=0 2024-12-16T17:57:14,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742019_1195 (size=12151) 2024-12-16T17:57:14,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/e65e8ebdfc614858b13553354a4f07b7 2024-12-16T17:57:14,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba02e84dc04245adb2c3df3c4b69e98a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba02e84dc04245adb2c3df3c4b69e98a 2024-12-16T17:57:14,426 INFO [master/3609ad07831c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T17:57:14,426 INFO [master/3609ad07831c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T17:57:14,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba02e84dc04245adb2c3df3c4b69e98a, entries=150, sequenceid=201, filesize=30.4 K 2024-12-16T17:57:14,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/2892322bc4f64337b29f7d0743eb2980 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/2892322bc4f64337b29f7d0743eb2980 2024-12-16T17:57:14,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/2892322bc4f64337b29f7d0743eb2980, entries=150, sequenceid=201, filesize=11.9 K 2024-12-16T17:57:14,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/e65e8ebdfc614858b13553354a4f07b7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/e65e8ebdfc614858b13553354a4f07b7 2024-12-16T17:57:14,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/e65e8ebdfc614858b13553354a4f07b7, entries=150, sequenceid=201, filesize=11.9 K 2024-12-16T17:57:14,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 7212a7dec92fa5781081695b56d809ad in 154ms, sequenceid=201, compaction requested=true 2024-12-16T17:57:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:14,447 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:14,447 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:14,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:14,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,448 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:14,449 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/B is initiating minor compaction (all files) 2024-12-16T17:57:14,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,449 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/B in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:14,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,449 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/72e838897fc941f8a2df3ef7e6d11c7a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/fa811f5b58e54cf592047382f86c8500, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/2892322bc4f64337b29f7d0743eb2980] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=35.9 K 2024-12-16T17:57:14,449 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:14,449 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/A is initiating minor compaction (all files) 2024-12-16T17:57:14,449 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/A in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:14,449 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 72e838897fc941f8a2df3ef7e6d11c7a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734371829883 2024-12-16T17:57:14,449 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/cf4111b4a3cf4b11a022e238d987d481, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ebcf8b6d96054995b0634e7192da808e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba02e84dc04245adb2c3df3c4b69e98a] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=91.5 K 2024-12-16T17:57:14,449 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:14,449 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/cf4111b4a3cf4b11a022e238d987d481, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ebcf8b6d96054995b0634e7192da808e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba02e84dc04245adb2c3df3c4b69e98a] 2024-12-16T17:57:14,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,450 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting fa811f5b58e54cf592047382f86c8500, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734371832012 2024-12-16T17:57:14,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,450 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf4111b4a3cf4b11a022e238d987d481, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734371829883 2024-12-16T17:57:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,451 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 2892322bc4f64337b29f7d0743eb2980, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1734371833176 2024-12-16T17:57:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,452 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebcf8b6d96054995b0634e7192da808e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734371832012 2024-12-16T17:57:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,452 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba02e84dc04245adb2c3df3c4b69e98a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1734371833176 2024-12-16T17:57:14,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,485 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,487 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#B#compaction#165 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:14,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,488 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/b4f5a30590d74fc1903956c55d525b69 is 50, key is test_row_0/B:col10/1734371833176/Put/seqid=0 2024-12-16T17:57:14,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,495 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121654e309fa148647148af8fa172541106a_7212a7dec92fa5781081695b56d809ad store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:14,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,497 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121654e309fa148647148af8fa172541106a_7212a7dec92fa5781081695b56d809ad, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:14,497 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121654e309fa148647148af8fa172541106a_7212a7dec92fa5781081695b56d809ad because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:14,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742020_1196 (size=12595) 2024-12-16T17:57:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:14,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:14,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:14,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:14,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:14,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:14,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:14,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:14,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:14,520 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/b4f5a30590d74fc1903956c55d525b69 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/b4f5a30590d74fc1903956c55d525b69 2024-12-16T17:57:14,530 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/B of 7212a7dec92fa5781081695b56d809ad into b4f5a30590d74fc1903956c55d525b69(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:14,530 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:14,530 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/B, priority=13, startTime=1734371834447; duration=0sec 2024-12-16T17:57:14,531 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:14,531 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:14,531 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:14,532 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:14,532 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/C is initiating minor compaction (all files) 2024-12-16T17:57:14,533 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/C in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:14,533 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c82b4c6bd0d4095b3214eda58dfbf88, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/1cba54bfa6a7486d894a85c13f4598f3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/e65e8ebdfc614858b13553354a4f07b7] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=35.9 K 2024-12-16T17:57:14,533 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c82b4c6bd0d4095b3214eda58dfbf88, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734371829883 2024-12-16T17:57:14,534 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cba54bfa6a7486d894a85c13f4598f3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734371832012 2024-12-16T17:57:14,534 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e65e8ebdfc614858b13553354a4f07b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1734371833176 2024-12-16T17:57:14,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742021_1197 (size=4469) 2024-12-16T17:57:14,543 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#A#compaction#166 average throughput is 0.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:14,544 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/93958fdf6d434341a6b645a85f56509f is 175, key is test_row_0/A:col10/1734371833176/Put/seqid=0 2024-12-16T17:57:14,552 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#C#compaction#167 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:14,553 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/00c74d89c4934f4080f19e1b4e9aaa43 is 50, key is test_row_0/C:col10/1734371833176/Put/seqid=0 2024-12-16T17:57:14,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c6fa9c2d3c4142729849f3ba72a420d6_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371834334/Put/seqid=0 2024-12-16T17:57:14,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371894562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371894564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371894565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371894565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371894567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742022_1198 (size=31549) 2024-12-16T17:57:14,589 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/93958fdf6d434341a6b645a85f56509f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/93958fdf6d434341a6b645a85f56509f 2024-12-16T17:57:14,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742023_1199 (size=12595) 2024-12-16T17:57:14,600 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/A of 7212a7dec92fa5781081695b56d809ad into 93958fdf6d434341a6b645a85f56509f(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:14,601 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:14,601 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/A, priority=13, startTime=1734371834447; duration=0sec 2024-12-16T17:57:14,601 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:14,601 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:14,609 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/00c74d89c4934f4080f19e1b4e9aaa43 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/00c74d89c4934f4080f19e1b4e9aaa43 2024-12-16T17:57:14,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742024_1200 (size=17284) 2024-12-16T17:57:14,621 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/C of 7212a7dec92fa5781081695b56d809ad into 00c74d89c4934f4080f19e1b4e9aaa43(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:14,621 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:14,621 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/C, priority=13, startTime=1734371834447; duration=0sec 2024-12-16T17:57:14,621 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:14,622 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:14,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371894673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371894673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371894673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371894675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371894675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-16T17:57:14,721 INFO [Thread-705 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-16T17:57:14,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:14,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-16T17:57:14,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-16T17:57:14,723 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:14,724 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:14,724 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:14,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-16T17:57:14,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371894875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,876 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-16T17:57:14,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371894875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:14,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:14,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:14,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:14,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:14,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371894877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371894877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:14,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:14,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371894878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,015 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:15,021 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c6fa9c2d3c4142729849f3ba72a420d6_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216c6fa9c2d3c4142729849f3ba72a420d6_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:15,024 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/bd5a3a1af6294e16b24e0c88ad4711b8, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:15,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/bd5a3a1af6294e16b24e0c88ad4711b8 is 175, key is test_row_0/A:col10/1734371834334/Put/seqid=0 2024-12-16T17:57:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-16T17:57:15,030 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-16T17:57:15,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:15,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742025_1201 (size=48389) 2024-12-16T17:57:15,048 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/bd5a3a1af6294e16b24e0c88ad4711b8 2024-12-16T17:57:15,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e90482b51c3849688b30b606560170bf is 50, key is test_row_0/B:col10/1734371834334/Put/seqid=0 2024-12-16T17:57:15,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742026_1202 (size=12151) 2024-12-16T17:57:15,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371895178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371895178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371895179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371895180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371895182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,183 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-16T17:57:15,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:15,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,184 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-16T17:57:15,336 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-16T17:57:15,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:15,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,337 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e90482b51c3849688b30b606560170bf 2024-12-16T17:57:15,489 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-16T17:57:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:15,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d1668af17bc541ce940705dd2818bd0b is 50, key is test_row_0/C:col10/1734371834334/Put/seqid=0 2024-12-16T17:57:15,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742027_1203 (size=12151) 2024-12-16T17:57:15,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d1668af17bc541ce940705dd2818bd0b 2024-12-16T17:57:15,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/bd5a3a1af6294e16b24e0c88ad4711b8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/bd5a3a1af6294e16b24e0c88ad4711b8 2024-12-16T17:57:15,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/bd5a3a1af6294e16b24e0c88ad4711b8, entries=250, sequenceid=213, filesize=47.3 K 2024-12-16T17:57:15,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e90482b51c3849688b30b606560170bf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e90482b51c3849688b30b606560170bf 2024-12-16T17:57:15,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e90482b51c3849688b30b606560170bf, entries=150, sequenceid=213, filesize=11.9 K 2024-12-16T17:57:15,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/d1668af17bc541ce940705dd2818bd0b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1668af17bc541ce940705dd2818bd0b 2024-12-16T17:57:15,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1668af17bc541ce940705dd2818bd0b, entries=150, sequenceid=213, filesize=11.9 K 2024-12-16T17:57:15,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7212a7dec92fa5781081695b56d809ad in 1037ms, sequenceid=213, compaction requested=false 2024-12-16T17:57:15,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:15,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-16T17:57:15,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:15,645 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-16T17:57:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:15,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216f12d793ef0d742299bb0ff2cd81c3045_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371834565/Put/seqid=0 2024-12-16T17:57:15,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742028_1204 (size=12304) 2024-12-16T17:57:15,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:15,662 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216f12d793ef0d742299bb0ff2cd81c3045_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216f12d793ef0d742299bb0ff2cd81c3045_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:15,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:15,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:15,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a69582b913cf4020b2bef655d64fe16f, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:15,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a69582b913cf4020b2bef655d64fe16f is 175, key is test_row_0/A:col10/1734371834565/Put/seqid=0 2024-12-16T17:57:15,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371895688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371895689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371895691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371895692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371895692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742029_1205 (size=31105) 2024-12-16T17:57:15,707 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a69582b913cf4020b2bef655d64fe16f 2024-12-16T17:57:15,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/9484aeca67984bdd841c38956c7bbe19 is 50, key is test_row_0/B:col10/1734371834565/Put/seqid=0 2024-12-16T17:57:15,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742030_1206 (size=12151) 2024-12-16T17:57:15,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371895793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371895793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371895795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371895797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371895797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-16T17:57:15,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371895995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371895995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371895998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:15,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:15,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371895999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371896004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,156 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/9484aeca67984bdd841c38956c7bbe19 2024-12-16T17:57:16,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/861341f7f9f44e28abdb8a5628bb5e89 is 50, key is test_row_0/C:col10/1734371834565/Put/seqid=0 2024-12-16T17:57:16,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742031_1207 (size=12151) 2024-12-16T17:57:16,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371896297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371896297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371896302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371896302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371896307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,571 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/861341f7f9f44e28abdb8a5628bb5e89 2024-12-16T17:57:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a69582b913cf4020b2bef655d64fe16f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a69582b913cf4020b2bef655d64fe16f 2024-12-16T17:57:16,601 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a69582b913cf4020b2bef655d64fe16f, entries=150, sequenceid=240, filesize=30.4 K 2024-12-16T17:57:16,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/9484aeca67984bdd841c38956c7bbe19 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/9484aeca67984bdd841c38956c7bbe19 2024-12-16T17:57:16,615 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/9484aeca67984bdd841c38956c7bbe19, entries=150, sequenceid=240, filesize=11.9 K 2024-12-16T17:57:16,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/861341f7f9f44e28abdb8a5628bb5e89 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/861341f7f9f44e28abdb8a5628bb5e89 2024-12-16T17:57:16,625 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/861341f7f9f44e28abdb8a5628bb5e89, entries=150, sequenceid=240, filesize=11.9 K 2024-12-16T17:57:16,628 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7212a7dec92fa5781081695b56d809ad in 984ms, sequenceid=240, compaction requested=true 2024-12-16T17:57:16,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:16,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:16,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-16T17:57:16,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-16T17:57:16,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-16T17:57:16,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9060 sec 2024-12-16T17:57:16,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.9100 sec 2024-12-16T17:57:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:16,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:57:16,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:16,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:16,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:16,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:16,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:16,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:16,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121621c3f57590b841ba8bc4f98394ba43b6_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371835689/Put/seqid=0 2024-12-16T17:57:16,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371896820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371896821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371896822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371896824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371896824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-16T17:57:16,830 INFO [Thread-705 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-16T17:57:16,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:16,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-16T17:57:16,832 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:16,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T17:57:16,833 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:16,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:16,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742032_1208 (size=14794) 2024-12-16T17:57:16,836 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:16,841 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121621c3f57590b841ba8bc4f98394ba43b6_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121621c3f57590b841ba8bc4f98394ba43b6_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:16,844 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/519028bdec9445bd8dfb12c201548b5a, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:16,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/519028bdec9445bd8dfb12c201548b5a is 175, key is test_row_0/A:col10/1734371835689/Put/seqid=0 2024-12-16T17:57:16,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742033_1209 (size=39749) 2024-12-16T17:57:16,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371896925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371896925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371896925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371896927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:16,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371896928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T17:57:16,984 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:16,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:16,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371897129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371897130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371897130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371897130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371897131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T17:57:17,137 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:17,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:17,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,251 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/519028bdec9445bd8dfb12c201548b5a 2024-12-16T17:57:17,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/aa3c0c1d37e3495a9df5e9babce26c7f is 50, key is test_row_0/B:col10/1734371835689/Put/seqid=0 2024-12-16T17:57:17,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742034_1210 (size=12151) 2024-12-16T17:57:17,290 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:17,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:17,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371897434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T17:57:17,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371897435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371897435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371897436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371897436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,447 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,447 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,599 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:17,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:17,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/aa3c0c1d37e3495a9df5e9babce26c7f 2024-12-16T17:57:17,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/652d941856074f2fbcc3a31ef89f1cdf is 50, key is test_row_0/C:col10/1734371835689/Put/seqid=0 2024-12-16T17:57:17,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742035_1211 (size=12151) 2024-12-16T17:57:17,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:17,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:17,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:17,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:17,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:17,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:17,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T17:57:17,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371897939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371897939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371897939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371897941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:17,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:17,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371897941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,058 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:18,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:18,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:18,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:18,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:18,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:18,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:18,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/652d941856074f2fbcc3a31ef89f1cdf 2024-12-16T17:57:18,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/519028bdec9445bd8dfb12c201548b5a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/519028bdec9445bd8dfb12c201548b5a 2024-12-16T17:57:18,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/519028bdec9445bd8dfb12c201548b5a, entries=200, sequenceid=253, filesize=38.8 K 2024-12-16T17:57:18,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/aa3c0c1d37e3495a9df5e9babce26c7f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/aa3c0c1d37e3495a9df5e9babce26c7f 2024-12-16T17:57:18,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/aa3c0c1d37e3495a9df5e9babce26c7f, entries=150, sequenceid=253, filesize=11.9 K 2024-12-16T17:57:18,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/652d941856074f2fbcc3a31ef89f1cdf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/652d941856074f2fbcc3a31ef89f1cdf 2024-12-16T17:57:18,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/652d941856074f2fbcc3a31ef89f1cdf, entries=150, sequenceid=253, filesize=11.9 K 2024-12-16T17:57:18,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7212a7dec92fa5781081695b56d809ad in 1299ms, sequenceid=253, compaction requested=true 2024-12-16T17:57:18,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:18,102 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:18,102 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:18,103 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:18,103 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 150792 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:18,103 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/A is initiating minor compaction (all files) 2024-12-16T17:57:18,103 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/B is initiating minor compaction (all files) 2024-12-16T17:57:18,103 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/B in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:18,103 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/A in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:18,103 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/b4f5a30590d74fc1903956c55d525b69, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e90482b51c3849688b30b606560170bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/9484aeca67984bdd841c38956c7bbe19, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/aa3c0c1d37e3495a9df5e9babce26c7f] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=47.9 K 2024-12-16T17:57:18,103 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/93958fdf6d434341a6b645a85f56509f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/bd5a3a1af6294e16b24e0c88ad4711b8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a69582b913cf4020b2bef655d64fe16f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/519028bdec9445bd8dfb12c201548b5a] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=147.3 K 2024-12-16T17:57:18,103 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:18,103 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/93958fdf6d434341a6b645a85f56509f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/bd5a3a1af6294e16b24e0c88ad4711b8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a69582b913cf4020b2bef655d64fe16f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/519028bdec9445bd8dfb12c201548b5a] 2024-12-16T17:57:18,104 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b4f5a30590d74fc1903956c55d525b69, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1734371833176 2024-12-16T17:57:18,104 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93958fdf6d434341a6b645a85f56509f, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1734371833176 2024-12-16T17:57:18,104 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e90482b51c3849688b30b606560170bf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734371834334 2024-12-16T17:57:18,104 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd5a3a1af6294e16b24e0c88ad4711b8, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734371834331 2024-12-16T17:57:18,104 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 9484aeca67984bdd841c38956c7bbe19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371834553 2024-12-16T17:57:18,105 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting aa3c0c1d37e3495a9df5e9babce26c7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371835688 2024-12-16T17:57:18,105 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a69582b913cf4020b2bef655d64fe16f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371834553 2024-12-16T17:57:18,106 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 519028bdec9445bd8dfb12c201548b5a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371835688 2024-12-16T17:57:18,113 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#B#compaction#177 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:18,113 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:18,113 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/27377419172649f9b2ee205fef33d10e is 50, key is test_row_0/B:col10/1734371835689/Put/seqid=0 2024-12-16T17:57:18,117 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121687f6c1154cd74a879449443fd8667b70_7212a7dec92fa5781081695b56d809ad store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:18,119 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121687f6c1154cd74a879449443fd8667b70_7212a7dec92fa5781081695b56d809ad, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:18,119 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121687f6c1154cd74a879449443fd8667b70_7212a7dec92fa5781081695b56d809ad because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:18,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742036_1212 (size=12731) 2024-12-16T17:57:18,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742037_1213 (size=4469) 2024-12-16T17:57:18,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-16T17:57:18,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:18,212 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-16T17:57:18,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:18,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:18,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:18,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:18,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:18,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:18,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216895dff39e0fe49358df80ed03e7fe90a_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371836819/Put/seqid=0 2024-12-16T17:57:18,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742038_1214 (size=12454) 2024-12-16T17:57:18,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:18,236 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216895dff39e0fe49358df80ed03e7fe90a_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216895dff39e0fe49358df80ed03e7fe90a_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:18,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/1055fd24838e4cbe92110aa003f8d07b, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:18,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/1055fd24838e4cbe92110aa003f8d07b is 175, key is test_row_0/A:col10/1734371836819/Put/seqid=0 2024-12-16T17:57:18,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742039_1215 (size=31255) 2024-12-16T17:57:18,531 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#A#compaction#178 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:18,532 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/028e1fc0ff264d78b92019720337eda2 is 175, key is test_row_0/A:col10/1734371835689/Put/seqid=0 2024-12-16T17:57:18,536 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/27377419172649f9b2ee205fef33d10e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/27377419172649f9b2ee205fef33d10e 2024-12-16T17:57:18,547 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/B of 7212a7dec92fa5781081695b56d809ad into 27377419172649f9b2ee205fef33d10e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:18,547 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:18,547 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/B, priority=12, startTime=1734371838102; duration=0sec 2024-12-16T17:57:18,547 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:18,547 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:18,548 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:18,550 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:18,550 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/C is initiating minor compaction (all files) 2024-12-16T17:57:18,550 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/C in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:18,550 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/00c74d89c4934f4080f19e1b4e9aaa43, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1668af17bc541ce940705dd2818bd0b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/861341f7f9f44e28abdb8a5628bb5e89, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/652d941856074f2fbcc3a31ef89f1cdf] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=47.9 K 2024-12-16T17:57:18,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 00c74d89c4934f4080f19e1b4e9aaa43, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1734371833176 2024-12-16T17:57:18,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d1668af17bc541ce940705dd2818bd0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734371834334 2024-12-16T17:57:18,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 861341f7f9f44e28abdb8a5628bb5e89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1734371834553 2024-12-16T17:57:18,553 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 652d941856074f2fbcc3a31ef89f1cdf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371835688 2024-12-16T17:57:18,569 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#C#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:18,570 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/c929de87769e4ad392ffb2e31a1b6102 is 50, key is test_row_0/C:col10/1734371835689/Put/seqid=0 2024-12-16T17:57:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742040_1216 (size=31685) 2024-12-16T17:57:18,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742041_1217 (size=12731) 2024-12-16T17:57:18,589 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/c929de87769e4ad392ffb2e31a1b6102 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/c929de87769e4ad392ffb2e31a1b6102 2024-12-16T17:57:18,596 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/C of 7212a7dec92fa5781081695b56d809ad into c929de87769e4ad392ffb2e31a1b6102(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:18,596 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:18,596 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/C, priority=12, startTime=1734371838102; duration=0sec 2024-12-16T17:57:18,596 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:18,596 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:18,643 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/1055fd24838e4cbe92110aa003f8d07b 2024-12-16T17:57:18,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/d20a5db0fe0b43ecbf20da1f3ab4f07a is 50, key is test_row_0/B:col10/1734371836819/Put/seqid=0 2024-12-16T17:57:18,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742042_1218 (size=12301) 2024-12-16T17:57:18,655 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/d20a5db0fe0b43ecbf20da1f3ab4f07a 2024-12-16T17:57:18,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/b3037d6f8597426fba3b5b5e771880ed is 50, key is test_row_0/C:col10/1734371836819/Put/seqid=0 2024-12-16T17:57:18,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742043_1219 (size=12301) 2024-12-16T17:57:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T17:57:18,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:18,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371898956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371898956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371898956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371898957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371898957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:18,983 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/028e1fc0ff264d78b92019720337eda2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/028e1fc0ff264d78b92019720337eda2 2024-12-16T17:57:18,990 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/A of 7212a7dec92fa5781081695b56d809ad into 028e1fc0ff264d78b92019720337eda2(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:18,990 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:18,990 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/A, priority=12, startTime=1734371838102; duration=0sec 2024-12-16T17:57:18,990 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:18,990 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:19,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371899060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371899060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371899061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371899061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371899061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,074 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/b3037d6f8597426fba3b5b5e771880ed 2024-12-16T17:57:19,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/1055fd24838e4cbe92110aa003f8d07b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/1055fd24838e4cbe92110aa003f8d07b 2024-12-16T17:57:19,098 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/1055fd24838e4cbe92110aa003f8d07b, entries=150, sequenceid=276, filesize=30.5 K 2024-12-16T17:57:19,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/d20a5db0fe0b43ecbf20da1f3ab4f07a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d20a5db0fe0b43ecbf20da1f3ab4f07a 2024-12-16T17:57:19,106 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d20a5db0fe0b43ecbf20da1f3ab4f07a, entries=150, sequenceid=276, filesize=12.0 K 2024-12-16T17:57:19,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/b3037d6f8597426fba3b5b5e771880ed as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/b3037d6f8597426fba3b5b5e771880ed 2024-12-16T17:57:19,119 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/b3037d6f8597426fba3b5b5e771880ed, entries=150, sequenceid=276, filesize=12.0 K 2024-12-16T17:57:19,122 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 7212a7dec92fa5781081695b56d809ad in 910ms, sequenceid=276, compaction requested=false 2024-12-16T17:57:19,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:19,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:19,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-16T17:57:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-16T17:57:19,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-16T17:57:19,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2900 sec 2024-12-16T17:57:19,127 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.2940 sec 2024-12-16T17:57:19,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:19,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-16T17:57:19,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:19,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:19,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:19,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:19,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:19,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:19,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121656d4ff57e0664d229452714000dabfcc_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371838955/Put/seqid=0 2024-12-16T17:57:19,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742044_1220 (size=12454) 2024-12-16T17:57:19,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371899284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371899284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371899285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371899286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371899287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371899390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371899390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371899390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371899390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371899391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371899592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371899592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371899592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371899593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371899595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,689 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:19,698 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121656d4ff57e0664d229452714000dabfcc_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121656d4ff57e0664d229452714000dabfcc_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:19,699 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2b694b49804a48d78f0794a877afaf27, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:19,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2b694b49804a48d78f0794a877afaf27 is 175, key is test_row_0/A:col10/1734371838955/Put/seqid=0 2024-12-16T17:57:19,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742045_1221 (size=31255) 2024-12-16T17:57:19,714 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2b694b49804a48d78f0794a877afaf27 2024-12-16T17:57:19,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e195598ac6a54bc881d7512aa6e98405 is 50, key is test_row_0/B:col10/1734371838955/Put/seqid=0 2024-12-16T17:57:19,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742046_1222 (size=12301) 2024-12-16T17:57:19,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371899894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371899895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371899896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371899896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:19,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371899897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:20,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e195598ac6a54bc881d7512aa6e98405 2024-12-16T17:57:20,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/ebb81563988a43939276f33d1b25559e is 50, key is test_row_0/C:col10/1734371838955/Put/seqid=0 2024-12-16T17:57:20,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742047_1223 (size=12301) 2024-12-16T17:57:20,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:20,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371900397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:20,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:20,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:20,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371900398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:20,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371900398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:20,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:20,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371900400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:20,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:20,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371900402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:20,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/ebb81563988a43939276f33d1b25559e 2024-12-16T17:57:20,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/2b694b49804a48d78f0794a877afaf27 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2b694b49804a48d78f0794a877afaf27 2024-12-16T17:57:20,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2b694b49804a48d78f0794a877afaf27, entries=150, sequenceid=295, filesize=30.5 K 2024-12-16T17:57:20,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e195598ac6a54bc881d7512aa6e98405 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e195598ac6a54bc881d7512aa6e98405 2024-12-16T17:57:20,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e195598ac6a54bc881d7512aa6e98405, entries=150, sequenceid=295, filesize=12.0 K 2024-12-16T17:57:20,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/ebb81563988a43939276f33d1b25559e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ebb81563988a43939276f33d1b25559e 2024-12-16T17:57:20,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ebb81563988a43939276f33d1b25559e, entries=150, sequenceid=295, filesize=12.0 K 2024-12-16T17:57:20,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7212a7dec92fa5781081695b56d809ad in 1299ms, sequenceid=295, compaction requested=true 2024-12-16T17:57:20,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:20,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:20,565 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:20,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:20,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:20,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:20,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:20,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-16T17:57:20,566 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:20,568 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:20,568 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:20,568 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/A is initiating minor compaction (all files) 2024-12-16T17:57:20,568 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/B is initiating minor compaction (all files) 2024-12-16T17:57:20,568 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/A in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:20,568 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/B in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:20,568 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/27377419172649f9b2ee205fef33d10e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d20a5db0fe0b43ecbf20da1f3ab4f07a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e195598ac6a54bc881d7512aa6e98405] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=36.5 K 2024-12-16T17:57:20,568 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/028e1fc0ff264d78b92019720337eda2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/1055fd24838e4cbe92110aa003f8d07b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2b694b49804a48d78f0794a877afaf27] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=92.0 K 2024-12-16T17:57:20,568 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:20,569 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/028e1fc0ff264d78b92019720337eda2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/1055fd24838e4cbe92110aa003f8d07b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2b694b49804a48d78f0794a877afaf27] 2024-12-16T17:57:20,569 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 27377419172649f9b2ee205fef33d10e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371835688 2024-12-16T17:57:20,569 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 028e1fc0ff264d78b92019720337eda2, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371835688 2024-12-16T17:57:20,569 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d20a5db0fe0b43ecbf20da1f3ab4f07a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734371836819 2024-12-16T17:57:20,570 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1055fd24838e4cbe92110aa003f8d07b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734371836819 2024-12-16T17:57:20,570 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b694b49804a48d78f0794a877afaf27, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1734371838955 2024-12-16T17:57:20,571 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e195598ac6a54bc881d7512aa6e98405, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1734371838955 2024-12-16T17:57:20,577 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:20,579 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#B#compaction#187 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:20,579 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/1e5884c4e699444f9a1263d02c2b0d48 is 50, key is test_row_0/B:col10/1734371838955/Put/seqid=0 2024-12-16T17:57:20,581 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216383d2759f3034fa7bfd9cb9110b6236f_7212a7dec92fa5781081695b56d809ad store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:20,583 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216383d2759f3034fa7bfd9cb9110b6236f_7212a7dec92fa5781081695b56d809ad, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:20,583 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216383d2759f3034fa7bfd9cb9110b6236f_7212a7dec92fa5781081695b56d809ad because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:20,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742048_1224 (size=12983) 2024-12-16T17:57:20,618 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/1e5884c4e699444f9a1263d02c2b0d48 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1e5884c4e699444f9a1263d02c2b0d48 2024-12-16T17:57:20,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742049_1225 (size=4469) 2024-12-16T17:57:20,628 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/B of 7212a7dec92fa5781081695b56d809ad into 1e5884c4e699444f9a1263d02c2b0d48(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:20,628 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:20,628 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/B, priority=13, startTime=1734371840565; duration=0sec 2024-12-16T17:57:20,628 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:20,628 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:20,628 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:20,630 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:20,630 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/C is initiating minor compaction (all files) 2024-12-16T17:57:20,630 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/C in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:20,630 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/c929de87769e4ad392ffb2e31a1b6102, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/b3037d6f8597426fba3b5b5e771880ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ebb81563988a43939276f33d1b25559e] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=36.5 K 2024-12-16T17:57:20,631 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting c929de87769e4ad392ffb2e31a1b6102, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734371835688 2024-12-16T17:57:20,631 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b3037d6f8597426fba3b5b5e771880ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734371836819 2024-12-16T17:57:20,632 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ebb81563988a43939276f33d1b25559e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1734371838955 2024-12-16T17:57:20,643 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#C#compaction#188 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:20,644 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/bae7a6d695514c2dbedcb0fc412faf2a is 50, key is test_row_0/C:col10/1734371838955/Put/seqid=0 2024-12-16T17:57:20,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742050_1226 (size=12983) 2024-12-16T17:57:20,656 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/bae7a6d695514c2dbedcb0fc412faf2a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bae7a6d695514c2dbedcb0fc412faf2a 2024-12-16T17:57:20,665 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/C of 7212a7dec92fa5781081695b56d809ad into bae7a6d695514c2dbedcb0fc412faf2a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:20,665 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:20,665 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/C, priority=13, startTime=1734371840565; duration=0sec 2024-12-16T17:57:20,665 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:20,665 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:20,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T17:57:20,937 INFO [Thread-705 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-16T17:57:20,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:20,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-16T17:57:20,939 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:20,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-16T17:57:20,940 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:20,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:21,019 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#A#compaction#186 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:21,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/e7264d83d5c94bc68e56beda8deb1876 is 175, key is test_row_0/A:col10/1734371838955/Put/seqid=0 2024-12-16T17:57:21,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742051_1227 (size=31937) 2024-12-16T17:57:21,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-16T17:57:21,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-16T17:57:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:21,093 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-16T17:57:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:21,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216dd85b38478ee42578a05e4b16ef19e71_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371839284/Put/seqid=0 2024-12-16T17:57:21,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742052_1228 (size=12454) 2024-12-16T17:57:21,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-16T17:57:21,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:21,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371901411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371901411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371901412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371901413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371901414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,431 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/e7264d83d5c94bc68e56beda8deb1876 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7264d83d5c94bc68e56beda8deb1876 2024-12-16T17:57:21,435 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/A of 7212a7dec92fa5781081695b56d809ad into e7264d83d5c94bc68e56beda8deb1876(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:21,435 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:21,435 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/A, priority=13, startTime=1734371840565; duration=0sec 2024-12-16T17:57:21,435 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:21,435 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:21,508 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216dd85b38478ee42578a05e4b16ef19e71_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216dd85b38478ee42578a05e4b16ef19e71_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:21,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a5bf7b4ed79e4dd8ae3634df2a20c011, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:21,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a5bf7b4ed79e4dd8ae3634df2a20c011 is 175, key is test_row_0/A:col10/1734371839284/Put/seqid=0 2024-12-16T17:57:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742053_1229 (size=31255) 2024-12-16T17:57:21,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371901516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371901516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371901516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371901516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371901516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-16T17:57:21,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371901718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371901718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371901718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371901720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:21,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371901720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:21,916 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a5bf7b4ed79e4dd8ae3634df2a20c011 2024-12-16T17:57:21,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e70f420a9aae40de8dd536ac1f67b4e8 is 50, key is test_row_0/B:col10/1734371839284/Put/seqid=0 2024-12-16T17:57:21,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742054_1230 (size=12301) 2024-12-16T17:57:21,932 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e70f420a9aae40de8dd536ac1f67b4e8 2024-12-16T17:57:21,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/ab5f11a8d0a04df98673fed99afc69ed is 50, key is test_row_0/C:col10/1734371839284/Put/seqid=0 2024-12-16T17:57:21,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742055_1231 (size=12301) 2024-12-16T17:57:21,944 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/ab5f11a8d0a04df98673fed99afc69ed 2024-12-16T17:57:21,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/a5bf7b4ed79e4dd8ae3634df2a20c011 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a5bf7b4ed79e4dd8ae3634df2a20c011 2024-12-16T17:57:21,954 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a5bf7b4ed79e4dd8ae3634df2a20c011, entries=150, sequenceid=317, filesize=30.5 K 2024-12-16T17:57:21,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-16T17:57:21,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/e70f420a9aae40de8dd536ac1f67b4e8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e70f420a9aae40de8dd536ac1f67b4e8 2024-12-16T17:57:21,959 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e70f420a9aae40de8dd536ac1f67b4e8, entries=150, sequenceid=317, filesize=12.0 K 2024-12-16T17:57:21,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/ab5f11a8d0a04df98673fed99afc69ed as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ab5f11a8d0a04df98673fed99afc69ed 2024-12-16T17:57:21,965 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ab5f11a8d0a04df98673fed99afc69ed, entries=150, sequenceid=317, filesize=12.0 K 2024-12-16T17:57:21,966 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7212a7dec92fa5781081695b56d809ad in 873ms, sequenceid=317, compaction requested=false 2024-12-16T17:57:21,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:21,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:21,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-16T17:57:21,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-16T17:57:21,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-16T17:57:21,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0280 sec 2024-12-16T17:57:21,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.0320 sec 2024-12-16T17:57:22,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:22,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-16T17:57:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:22,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412163f7e259435934a7ebc5dd74ce6c8fbad_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371841410/Put/seqid=0 2024-12-16T17:57:22,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742056_1232 (size=14994) 2024-12-16T17:57:22,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371902031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371902033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371902035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371902036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371902037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-16T17:57:22,042 INFO [Thread-705 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-16T17:57:22,043 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:22,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-16T17:57:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-16T17:57:22,045 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:22,045 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:22,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:22,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371902138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371902138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371902138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371902140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371902140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-16T17:57:22,196 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:22,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:22,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371902342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371902342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371902342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371902343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371902343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-16T17:57:22,349 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:22,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:22,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,436 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:22,440 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412163f7e259435934a7ebc5dd74ce6c8fbad_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412163f7e259435934a7ebc5dd74ce6c8fbad_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:22,441 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba3c02dba81a4260a9930a26606e3f2b, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:22,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba3c02dba81a4260a9930a26606e3f2b is 175, key is test_row_0/A:col10/1734371841410/Put/seqid=0 2024-12-16T17:57:22,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742057_1233 (size=39949) 2024-12-16T17:57:22,501 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:22,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:22,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,502 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-16T17:57:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371902644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371902644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371902646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371902646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371902648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,654 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:22,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:22,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,806 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:22,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:22,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,807 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,847 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=338, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba3c02dba81a4260a9930a26606e3f2b 2024-12-16T17:57:22,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc is 50, key is test_row_0/B:col10/1734371841410/Put/seqid=0 2024-12-16T17:57:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742058_1234 (size=12301) 2024-12-16T17:57:22,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc 2024-12-16T17:57:22,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/795357db82374053b61402c7bde286c2 is 50, key is test_row_0/C:col10/1734371841410/Put/seqid=0 2024-12-16T17:57:22,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742059_1235 (size=12301) 2024-12-16T17:57:22,959 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:22,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:22,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:22,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:22,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:22,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:23,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:23,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:23,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:23,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:23,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:23,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-16T17:57:23,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:23,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371903148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:23,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371903150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:23,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371903151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:23,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371903152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:23,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371903155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:23,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:23,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:23,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:23,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:23,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/795357db82374053b61402c7bde286c2 2024-12-16T17:57:23,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/ba3c02dba81a4260a9930a26606e3f2b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba3c02dba81a4260a9930a26606e3f2b 2024-12-16T17:57:23,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba3c02dba81a4260a9930a26606e3f2b, entries=200, sequenceid=338, filesize=39.0 K 2024-12-16T17:57:23,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc 2024-12-16T17:57:23,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc, entries=150, sequenceid=338, filesize=12.0 K 2024-12-16T17:57:23,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/795357db82374053b61402c7bde286c2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/795357db82374053b61402c7bde286c2 2024-12-16T17:57:23,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/795357db82374053b61402c7bde286c2, entries=150, sequenceid=338, filesize=12.0 K 2024-12-16T17:57:23,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7212a7dec92fa5781081695b56d809ad in 1304ms, sequenceid=338, compaction requested=true 2024-12-16T17:57:23,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:23,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:23,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:23,328 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:23,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:23,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:23,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:23,328 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:23,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:23,329 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:23,329 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:23,329 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/A is initiating minor compaction (all files) 2024-12-16T17:57:23,329 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/B is initiating minor compaction (all files) 2024-12-16T17:57:23,329 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/B in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,329 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/A in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,329 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1e5884c4e699444f9a1263d02c2b0d48, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e70f420a9aae40de8dd536ac1f67b4e8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=36.7 K 2024-12-16T17:57:23,329 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7264d83d5c94bc68e56beda8deb1876, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a5bf7b4ed79e4dd8ae3634df2a20c011, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba3c02dba81a4260a9930a26606e3f2b] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=100.7 K 2024-12-16T17:57:23,329 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,329 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7264d83d5c94bc68e56beda8deb1876, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a5bf7b4ed79e4dd8ae3634df2a20c011, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba3c02dba81a4260a9930a26606e3f2b] 2024-12-16T17:57:23,329 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e5884c4e699444f9a1263d02c2b0d48, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1734371838955 2024-12-16T17:57:23,329 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7264d83d5c94bc68e56beda8deb1876, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1734371838955 2024-12-16T17:57:23,330 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e70f420a9aae40de8dd536ac1f67b4e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734371839281 2024-12-16T17:57:23,330 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5bf7b4ed79e4dd8ae3634df2a20c011, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734371839281 2024-12-16T17:57:23,330 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d5e4db6f4434cdc90cc3e2a2bb4bcbc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1734371841410 2024-12-16T17:57:23,330 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba3c02dba81a4260a9930a26606e3f2b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1734371841410 2024-12-16T17:57:23,336 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#B#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:23,337 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/ef1811e997a6470a98d2089a9e7924e3 is 50, key is test_row_0/B:col10/1734371841410/Put/seqid=0 2024-12-16T17:57:23,338 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:23,341 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412165ec16840fde3437b82638272851aca79_7212a7dec92fa5781081695b56d809ad store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:23,343 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412165ec16840fde3437b82638272851aca79_7212a7dec92fa5781081695b56d809ad, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:23,343 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412165ec16840fde3437b82638272851aca79_7212a7dec92fa5781081695b56d809ad because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:23,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742060_1236 (size=13085) 2024-12-16T17:57:23,351 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/ef1811e997a6470a98d2089a9e7924e3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ef1811e997a6470a98d2089a9e7924e3 2024-12-16T17:57:23,356 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/B of 7212a7dec92fa5781081695b56d809ad into ef1811e997a6470a98d2089a9e7924e3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:23,356 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:23,357 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/B, priority=13, startTime=1734371843328; duration=0sec 2024-12-16T17:57:23,357 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:23,357 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:23,357 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:23,360 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:23,360 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7212a7dec92fa5781081695b56d809ad/C is initiating minor compaction (all files) 2024-12-16T17:57:23,360 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7212a7dec92fa5781081695b56d809ad/C in TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,360 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bae7a6d695514c2dbedcb0fc412faf2a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ab5f11a8d0a04df98673fed99afc69ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/795357db82374053b61402c7bde286c2] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp, totalSize=36.7 K 2024-12-16T17:57:23,360 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting bae7a6d695514c2dbedcb0fc412faf2a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1734371838955 2024-12-16T17:57:23,361 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ab5f11a8d0a04df98673fed99afc69ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734371839281 2024-12-16T17:57:23,361 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 795357db82374053b61402c7bde286c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1734371841410 2024-12-16T17:57:23,372 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#C#compaction#197 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:23,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742061_1237 (size=4469) 2024-12-16T17:57:23,373 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/83de57082e67492688a44efacb4b1aea is 50, key is test_row_0/C:col10/1734371841410/Put/seqid=0 2024-12-16T17:57:23,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742062_1238 (size=13085) 2024-12-16T17:57:23,394 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/83de57082e67492688a44efacb4b1aea as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/83de57082e67492688a44efacb4b1aea 2024-12-16T17:57:23,399 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/C of 7212a7dec92fa5781081695b56d809ad into 83de57082e67492688a44efacb4b1aea(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:23,399 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:23,399 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/C, priority=13, startTime=1734371843328; duration=0sec 2024-12-16T17:57:23,399 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:23,399 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:23,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:23,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-16T17:57:23,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:23,423 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-16T17:57:23,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:23,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:23,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:23,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:23,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:23,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:23,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216d76a8582e0d9454aa2e4a596995dff40_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371842031/Put/seqid=0 2024-12-16T17:57:23,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742063_1239 (size=12454) 2024-12-16T17:57:23,774 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7212a7dec92fa5781081695b56d809ad#A#compaction#196 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:23,774 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/df0fba99142b4dda9d2c1954d48466e8 is 175, key is test_row_0/A:col10/1734371841410/Put/seqid=0 2024-12-16T17:57:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742064_1240 (size=32039) 2024-12-16T17:57:23,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:23,838 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216d76a8582e0d9454aa2e4a596995dff40_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216d76a8582e0d9454aa2e4a596995dff40_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:23,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f283ddca78a74eb4b9eda82d60858428, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:23,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f283ddca78a74eb4b9eda82d60858428 is 175, key is test_row_0/A:col10/1734371842031/Put/seqid=0 2024-12-16T17:57:23,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742065_1241 (size=31255) 2024-12-16T17:57:23,849 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f283ddca78a74eb4b9eda82d60858428 2024-12-16T17:57:23,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/70c573e0eb7c497db61e61e9cda672ae is 50, key is test_row_0/B:col10/1734371842031/Put/seqid=0 2024-12-16T17:57:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742066_1242 (size=12301) 2024-12-16T17:57:24,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-16T17:57:24,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:24,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. as already flushing 2024-12-16T17:57:24,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371904166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371904166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371904167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371904170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371904170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,185 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/df0fba99142b4dda9d2c1954d48466e8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/df0fba99142b4dda9d2c1954d48466e8 2024-12-16T17:57:24,189 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7212a7dec92fa5781081695b56d809ad/A of 7212a7dec92fa5781081695b56d809ad into df0fba99142b4dda9d2c1954d48466e8(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:24,189 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:24,189 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad., storeName=7212a7dec92fa5781081695b56d809ad/A, priority=13, startTime=1734371843328; duration=0sec 2024-12-16T17:57:24,189 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:24,189 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:24,259 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/70c573e0eb7c497db61e61e9cda672ae 2024-12-16T17:57:24,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/bf04503173dd4f14bffedbfaddc1f3c2 is 50, key is test_row_0/C:col10/1734371842031/Put/seqid=0 2024-12-16T17:57:24,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742067_1243 (size=12301) 2024-12-16T17:57:24,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371904271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371904271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371904271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371904272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371904273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,394 DEBUG [Thread-708 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x191da8cb to 127.0.0.1:49190 2024-12-16T17:57:24,394 DEBUG [Thread-708 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,394 DEBUG [Thread-710 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50021c01 to 127.0.0.1:49190 2024-12-16T17:57:24,394 DEBUG [Thread-710 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,394 DEBUG [Thread-712 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x031a73c1 to 127.0.0.1:49190 2024-12-16T17:57:24,394 DEBUG [Thread-712 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,395 DEBUG [Thread-706 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67a7fd85 to 127.0.0.1:49190 2024-12-16T17:57:24,395 DEBUG [Thread-706 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41586 deadline: 1734371904475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41652 deadline: 1734371904475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41642 deadline: 1734371904476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41628 deadline: 1734371904477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41612 deadline: 1734371904478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:24,672 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/bf04503173dd4f14bffedbfaddc1f3c2 2024-12-16T17:57:24,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/f283ddca78a74eb4b9eda82d60858428 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f283ddca78a74eb4b9eda82d60858428 2024-12-16T17:57:24,682 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f283ddca78a74eb4b9eda82d60858428, entries=150, sequenceid=356, filesize=30.5 K 2024-12-16T17:57:24,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/70c573e0eb7c497db61e61e9cda672ae as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/70c573e0eb7c497db61e61e9cda672ae 2024-12-16T17:57:24,686 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/70c573e0eb7c497db61e61e9cda672ae, entries=150, sequenceid=356, filesize=12.0 K 2024-12-16T17:57:24,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/bf04503173dd4f14bffedbfaddc1f3c2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bf04503173dd4f14bffedbfaddc1f3c2 2024-12-16T17:57:24,691 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bf04503173dd4f14bffedbfaddc1f3c2, entries=150, sequenceid=356, filesize=12.0 K 2024-12-16T17:57:24,692 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 7212a7dec92fa5781081695b56d809ad in 1269ms, sequenceid=356, compaction requested=false 2024-12-16T17:57:24,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:24,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:24,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-16T17:57:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-16T17:57:24,695 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-16T17:57:24,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6490 sec 2024-12-16T17:57:24,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.6530 sec 2024-12-16T17:57:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:24,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-16T17:57:24,781 DEBUG [Thread-703 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x582ce1c8 to 127.0.0.1:49190 2024-12-16T17:57:24,781 DEBUG [Thread-697 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x590870d6 to 127.0.0.1:49190 2024-12-16T17:57:24,781 DEBUG [Thread-697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,781 DEBUG [Thread-703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,782 DEBUG [Thread-695 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7dd223a4 to 127.0.0.1:49190 2024-12-16T17:57:24,782 DEBUG [Thread-695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:24,784 DEBUG [Thread-701 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e5b8c2 to 127.0.0.1:49190 2024-12-16T17:57:24,784 DEBUG [Thread-701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:24,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:24,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:24,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:24,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:24,785 DEBUG [Thread-699 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2058489b to 127.0.0.1:49190 2024-12-16T17:57:24,785 DEBUG [Thread-699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:24,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412165c6b21ec322e40d39f6a6bf9769edc7e_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_0/A:col10/1734371844169/Put/seqid=0 2024-12-16T17:57:24,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742068_1244 (size=12454) 2024-12-16T17:57:25,194 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:25,199 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412165c6b21ec322e40d39f6a6bf9769edc7e_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412165c6b21ec322e40d39f6a6bf9769edc7e_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:25,201 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/462ae9fd449047e98cb42e99f27743c5, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:25,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/462ae9fd449047e98cb42e99f27743c5 is 175, key is test_row_0/A:col10/1734371844169/Put/seqid=0 2024-12-16T17:57:25,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742069_1245 (size=31255) 2024-12-16T17:57:25,608 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=382, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/462ae9fd449047e98cb42e99f27743c5 2024-12-16T17:57:25,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/870111d09468422db0cab8f2e56ada62 is 50, key is test_row_0/B:col10/1734371844169/Put/seqid=0 2024-12-16T17:57:25,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742070_1246 (size=12301) 2024-12-16T17:57:26,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/870111d09468422db0cab8f2e56ada62 2024-12-16T17:57:26,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/3c3555f4098341e5b2da32351e53fc1c is 50, key is test_row_0/C:col10/1734371844169/Put/seqid=0 2024-12-16T17:57:26,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742071_1247 (size=12301) 2024-12-16T17:57:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-16T17:57:26,149 INFO [Thread-705 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7405 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7189 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3057 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9170 rows 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3064 2024-12-16T17:57:26,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9192 rows 2024-12-16T17:57:26,150 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T17:57:26,150 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x232d9608 to 127.0.0.1:49190 2024-12-16T17:57:26,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:26,155 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-16T17:57:26,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-16T17:57:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T17:57:26,160 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371846159"}]},"ts":"1734371846159"} 2024-12-16T17:57:26,161 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-16T17:57:26,256 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-16T17:57:26,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:57:26,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, UNASSIGN}] 2024-12-16T17:57:26,259 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, UNASSIGN 2024-12-16T17:57:26,260 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:26,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T17:57:26,263 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:57:26,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; CloseRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:26,415 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:26,415 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(124): Close 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:26,415 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:57:26,415 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1681): Closing 7212a7dec92fa5781081695b56d809ad, disabling compactions & flushes 2024-12-16T17:57:26,415 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:26,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/3c3555f4098341e5b2da32351e53fc1c 2024-12-16T17:57:26,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/462ae9fd449047e98cb42e99f27743c5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/462ae9fd449047e98cb42e99f27743c5 2024-12-16T17:57:26,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/462ae9fd449047e98cb42e99f27743c5, entries=150, sequenceid=382, filesize=30.5 K 2024-12-16T17:57:26,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/870111d09468422db0cab8f2e56ada62 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/870111d09468422db0cab8f2e56ada62 2024-12-16T17:57:26,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/870111d09468422db0cab8f2e56ada62, entries=150, sequenceid=382, filesize=12.0 K 2024-12-16T17:57:26,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/3c3555f4098341e5b2da32351e53fc1c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c3555f4098341e5b2da32351e53fc1c 2024-12-16T17:57:26,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T17:57:26,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c3555f4098341e5b2da32351e53fc1c, entries=150, sequenceid=382, filesize=12.0 K 2024-12-16T17:57:26,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for 7212a7dec92fa5781081695b56d809ad in 1684ms, sequenceid=382, compaction requested=true 2024-12-16T17:57:26,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:26,465 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:26,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:26,465 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:26,465 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:26,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:26,465 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:A 2024-12-16T17:57:26,465 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. after waiting 0 ms 2024-12-16T17:57:26,465 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:26,465 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:26,465 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:B 2024-12-16T17:57:26,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:B, priority=-2147483648, current under compaction store size is 0 2024-12-16T17:57:26,465 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(2837): Flushing 7212a7dec92fa5781081695b56d809ad 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-16T17:57:26,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:26,465 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. because compaction request was cancelled 2024-12-16T17:57:26,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7212a7dec92fa5781081695b56d809ad:C, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:26,465 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7212a7dec92fa5781081695b56d809ad:C 2024-12-16T17:57:26,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:26,465 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=A 2024-12-16T17:57:26,465 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:26,465 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=B 2024-12-16T17:57:26,466 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:26,466 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7212a7dec92fa5781081695b56d809ad, store=C 2024-12-16T17:57:26,466 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:26,472 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a8605fcefbfc4f49ae798ddc228082a2_7212a7dec92fa5781081695b56d809ad is 50, key is test_row_2/A:col10/1734371844784/Put/seqid=0 2024-12-16T17:57:26,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742072_1248 (size=7374) 2024-12-16T17:57:26,490 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:26,494 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a8605fcefbfc4f49ae798ddc228082a2_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a8605fcefbfc4f49ae798ddc228082a2_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:26,495 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/5dae3698eee54ee18b3bc5817237539a, store: [table=TestAcidGuarantees family=A region=7212a7dec92fa5781081695b56d809ad] 2024-12-16T17:57:26,496 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/5dae3698eee54ee18b3bc5817237539a is 175, key is test_row_2/A:col10/1734371844784/Put/seqid=0 2024-12-16T17:57:26,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742073_1249 (size=13865) 2024-12-16T17:57:26,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T17:57:26,901 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=386, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/5dae3698eee54ee18b3bc5817237539a 2024-12-16T17:57:26,909 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/f492b432674647ccbd2c48211b287158 is 50, key is test_row_2/B:col10/1734371844784/Put/seqid=0 2024-12-16T17:57:26,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742074_1250 (size=7415) 2024-12-16T17:57:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T17:57:27,322 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/f492b432674647ccbd2c48211b287158 2024-12-16T17:57:27,332 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/6608f4f6f9444a8b813ae5930674f24a is 50, key is test_row_2/C:col10/1734371844784/Put/seqid=0 2024-12-16T17:57:27,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742075_1251 (size=7415) 2024-12-16T17:57:27,344 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/6608f4f6f9444a8b813ae5930674f24a 2024-12-16T17:57:27,349 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/A/5dae3698eee54ee18b3bc5817237539a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/5dae3698eee54ee18b3bc5817237539a 2024-12-16T17:57:27,356 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/5dae3698eee54ee18b3bc5817237539a, entries=50, sequenceid=386, filesize=13.5 K 2024-12-16T17:57:27,357 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/B/f492b432674647ccbd2c48211b287158 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/f492b432674647ccbd2c48211b287158 2024-12-16T17:57:27,363 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/f492b432674647ccbd2c48211b287158, entries=50, sequenceid=386, filesize=7.2 K 2024-12-16T17:57:27,364 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/.tmp/C/6608f4f6f9444a8b813ae5930674f24a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/6608f4f6f9444a8b813ae5930674f24a 2024-12-16T17:57:27,370 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/6608f4f6f9444a8b813ae5930674f24a, entries=50, sequenceid=386, filesize=7.2 K 2024-12-16T17:57:27,371 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for 7212a7dec92fa5781081695b56d809ad in 906ms, sequenceid=386, compaction requested=true 2024-12-16T17:57:27,372 DEBUG [StoreCloser-TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7a2c69b873d41a8a8ecdd9801ba0d17, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dcc91abd2eeb42f6a5223b33e78b2eac, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/8382e2b8d4f64b768f197bcaf208a283, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/373c3802fb174c3f964f48d4def4af6f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f67b9ee5e63546dbba0bb290afc45d7c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/31fc5bdb99ac439ca2196ede353c8165, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/6e4bd1593f7e4a5aa3d044ed97abfa43, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f0459809cdf14565a7b91b7cf26dbf34, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dab6500209b4499cbfcc2a9c00c43941, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2fb859d67a9d4a89b091c05ad4da0a5d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/cf4111b4a3cf4b11a022e238d987d481, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ebcf8b6d96054995b0634e7192da808e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/93958fdf6d434341a6b645a85f56509f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba02e84dc04245adb2c3df3c4b69e98a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/bd5a3a1af6294e16b24e0c88ad4711b8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a69582b913cf4020b2bef655d64fe16f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/519028bdec9445bd8dfb12c201548b5a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/028e1fc0ff264d78b92019720337eda2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/1055fd24838e4cbe92110aa003f8d07b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7264d83d5c94bc68e56beda8deb1876, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2b694b49804a48d78f0794a877afaf27, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a5bf7b4ed79e4dd8ae3634df2a20c011, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba3c02dba81a4260a9930a26606e3f2b] to archive 2024-12-16T17:57:27,374 DEBUG [StoreCloser-TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:27,378 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/373c3802fb174c3f964f48d4def4af6f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/373c3802fb174c3f964f48d4def4af6f 2024-12-16T17:57:27,378 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/6e4bd1593f7e4a5aa3d044ed97abfa43 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/6e4bd1593f7e4a5aa3d044ed97abfa43 2024-12-16T17:57:27,378 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/8382e2b8d4f64b768f197bcaf208a283 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/8382e2b8d4f64b768f197bcaf208a283 2024-12-16T17:57:27,378 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/31fc5bdb99ac439ca2196ede353c8165 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/31fc5bdb99ac439ca2196ede353c8165 2024-12-16T17:57:27,378 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7a2c69b873d41a8a8ecdd9801ba0d17 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7a2c69b873d41a8a8ecdd9801ba0d17 2024-12-16T17:57:27,378 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f0459809cdf14565a7b91b7cf26dbf34 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f0459809cdf14565a7b91b7cf26dbf34 2024-12-16T17:57:27,379 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dcc91abd2eeb42f6a5223b33e78b2eac to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dcc91abd2eeb42f6a5223b33e78b2eac 2024-12-16T17:57:27,379 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f67b9ee5e63546dbba0bb290afc45d7c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f67b9ee5e63546dbba0bb290afc45d7c 2024-12-16T17:57:27,381 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dab6500209b4499cbfcc2a9c00c43941 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/dab6500209b4499cbfcc2a9c00c43941 2024-12-16T17:57:27,381 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2fb859d67a9d4a89b091c05ad4da0a5d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2fb859d67a9d4a89b091c05ad4da0a5d 2024-12-16T17:57:27,382 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/cf4111b4a3cf4b11a022e238d987d481 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/cf4111b4a3cf4b11a022e238d987d481 2024-12-16T17:57:27,383 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba02e84dc04245adb2c3df3c4b69e98a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba02e84dc04245adb2c3df3c4b69e98a 2024-12-16T17:57:27,383 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ebcf8b6d96054995b0634e7192da808e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ebcf8b6d96054995b0634e7192da808e 2024-12-16T17:57:27,383 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/93958fdf6d434341a6b645a85f56509f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/93958fdf6d434341a6b645a85f56509f 2024-12-16T17:57:27,385 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a69582b913cf4020b2bef655d64fe16f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a69582b913cf4020b2bef655d64fe16f 2024-12-16T17:57:27,385 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/bd5a3a1af6294e16b24e0c88ad4711b8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/bd5a3a1af6294e16b24e0c88ad4711b8 2024-12-16T17:57:27,385 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/1055fd24838e4cbe92110aa003f8d07b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/1055fd24838e4cbe92110aa003f8d07b 2024-12-16T17:57:27,386 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/519028bdec9445bd8dfb12c201548b5a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/519028bdec9445bd8dfb12c201548b5a 2024-12-16T17:57:27,386 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2b694b49804a48d78f0794a877afaf27 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/2b694b49804a48d78f0794a877afaf27 2024-12-16T17:57:27,386 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7264d83d5c94bc68e56beda8deb1876 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/e7264d83d5c94bc68e56beda8deb1876 2024-12-16T17:57:27,387 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a5bf7b4ed79e4dd8ae3634df2a20c011 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/a5bf7b4ed79e4dd8ae3634df2a20c011 2024-12-16T17:57:27,387 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/028e1fc0ff264d78b92019720337eda2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/028e1fc0ff264d78b92019720337eda2 2024-12-16T17:57:27,387 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba3c02dba81a4260a9930a26606e3f2b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/ba3c02dba81a4260a9930a26606e3f2b 2024-12-16T17:57:27,390 DEBUG [StoreCloser-TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cde3158a9b194369a7a97a3584e33c0d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6a52563fbfd5403f9cf1ae3902893399, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/193c1325600f4b45a0ffca178f4ec64c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ede95ed9a256403d95b8292ef99cd61a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/7cedf7ac9f484cf6aa4aed7bd21913b5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d1d05544a6f04c359e7da403235cc9ab, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/dad3023d67d24c24b0fafbe3f107dcb8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1f2e2cd8de6d4ad8b79338878becbfe5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/437acd4259954da59a2bf09c87d14dea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/72e838897fc941f8a2df3ef7e6d11c7a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cbdbe7f032e44f119ab6148bf96a84a2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/fa811f5b58e54cf592047382f86c8500, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/b4f5a30590d74fc1903956c55d525b69, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/2892322bc4f64337b29f7d0743eb2980, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e90482b51c3849688b30b606560170bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/9484aeca67984bdd841c38956c7bbe19, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/27377419172649f9b2ee205fef33d10e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/aa3c0c1d37e3495a9df5e9babce26c7f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d20a5db0fe0b43ecbf20da1f3ab4f07a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1e5884c4e699444f9a1263d02c2b0d48, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e195598ac6a54bc881d7512aa6e98405, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e70f420a9aae40de8dd536ac1f67b4e8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc] to archive 2024-12-16T17:57:27,391 DEBUG [StoreCloser-TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:27,395 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/7cedf7ac9f484cf6aa4aed7bd21913b5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/7cedf7ac9f484cf6aa4aed7bd21913b5 2024-12-16T17:57:27,395 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cde3158a9b194369a7a97a3584e33c0d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cde3158a9b194369a7a97a3584e33c0d 2024-12-16T17:57:27,396 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6a52563fbfd5403f9cf1ae3902893399 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6a52563fbfd5403f9cf1ae3902893399 2024-12-16T17:57:27,397 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/dad3023d67d24c24b0fafbe3f107dcb8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/dad3023d67d24c24b0fafbe3f107dcb8 2024-12-16T17:57:27,398 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d1d05544a6f04c359e7da403235cc9ab to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d1d05544a6f04c359e7da403235cc9ab 2024-12-16T17:57:27,398 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/193c1325600f4b45a0ffca178f4ec64c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/193c1325600f4b45a0ffca178f4ec64c 2024-12-16T17:57:27,398 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ede95ed9a256403d95b8292ef99cd61a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ede95ed9a256403d95b8292ef99cd61a 2024-12-16T17:57:27,399 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1f2e2cd8de6d4ad8b79338878becbfe5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1f2e2cd8de6d4ad8b79338878becbfe5 2024-12-16T17:57:27,400 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/72e838897fc941f8a2df3ef7e6d11c7a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/72e838897fc941f8a2df3ef7e6d11c7a 2024-12-16T17:57:27,401 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/437acd4259954da59a2bf09c87d14dea to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/437acd4259954da59a2bf09c87d14dea 2024-12-16T17:57:27,401 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/fa811f5b58e54cf592047382f86c8500 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/fa811f5b58e54cf592047382f86c8500 2024-12-16T17:57:27,401 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/b4f5a30590d74fc1903956c55d525b69 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/b4f5a30590d74fc1903956c55d525b69 2024-12-16T17:57:27,401 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/2892322bc4f64337b29f7d0743eb2980 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/2892322bc4f64337b29f7d0743eb2980 2024-12-16T17:57:27,401 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cbdbe7f032e44f119ab6148bf96a84a2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/cbdbe7f032e44f119ab6148bf96a84a2 2024-12-16T17:57:27,401 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e90482b51c3849688b30b606560170bf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e90482b51c3849688b30b606560170bf 2024-12-16T17:57:27,403 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/9484aeca67984bdd841c38956c7bbe19 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/9484aeca67984bdd841c38956c7bbe19 2024-12-16T17:57:27,403 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/27377419172649f9b2ee205fef33d10e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/27377419172649f9b2ee205fef33d10e 2024-12-16T17:57:27,405 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1e5884c4e699444f9a1263d02c2b0d48 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/1e5884c4e699444f9a1263d02c2b0d48 2024-12-16T17:57:27,405 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d20a5db0fe0b43ecbf20da1f3ab4f07a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/d20a5db0fe0b43ecbf20da1f3ab4f07a 2024-12-16T17:57:27,405 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/aa3c0c1d37e3495a9df5e9babce26c7f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/aa3c0c1d37e3495a9df5e9babce26c7f 2024-12-16T17:57:27,405 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e195598ac6a54bc881d7512aa6e98405 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e195598ac6a54bc881d7512aa6e98405 2024-12-16T17:57:27,405 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e70f420a9aae40de8dd536ac1f67b4e8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/e70f420a9aae40de8dd536ac1f67b4e8 2024-12-16T17:57:27,405 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/6d5e4db6f4434cdc90cc3e2a2bb4bcbc 2024-12-16T17:57:27,413 DEBUG [StoreCloser-TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d931f50d815a4247a6a3494b887c3b1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/4b3db31384484ed8a1ad16415a6fb13c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/a5d4761eae92409585ad222fdf5cf957, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/7f51f3735b1b43e88139f4816ffcd031, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/16259b4a9fa74971bd76489851d9c7f2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0066b68d19ba49c2ab5b2ac5b7e384a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/31b6c1416a6d46108b4256276e1536d3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0fb1db9cfcd2468085fb2e8ad058f470, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/92bb9126b28b4121a60bd85f7a66fea0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c82b4c6bd0d4095b3214eda58dfbf88, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1366337e91f48eaa953f421c2de5039, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/1cba54bfa6a7486d894a85c13f4598f3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/00c74d89c4934f4080f19e1b4e9aaa43, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/e65e8ebdfc614858b13553354a4f07b7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1668af17bc541ce940705dd2818bd0b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/861341f7f9f44e28abdb8a5628bb5e89, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/c929de87769e4ad392ffb2e31a1b6102, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/652d941856074f2fbcc3a31ef89f1cdf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/b3037d6f8597426fba3b5b5e771880ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bae7a6d695514c2dbedcb0fc412faf2a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ebb81563988a43939276f33d1b25559e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ab5f11a8d0a04df98673fed99afc69ed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/795357db82374053b61402c7bde286c2] to archive 2024-12-16T17:57:27,416 DEBUG [StoreCloser-TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:27,420 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/4b3db31384484ed8a1ad16415a6fb13c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/4b3db31384484ed8a1ad16415a6fb13c 2024-12-16T17:57:27,421 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/31b6c1416a6d46108b4256276e1536d3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/31b6c1416a6d46108b4256276e1536d3 2024-12-16T17:57:27,422 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0066b68d19ba49c2ab5b2ac5b7e384a7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0066b68d19ba49c2ab5b2ac5b7e384a7 2024-12-16T17:57:27,422 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/16259b4a9fa74971bd76489851d9c7f2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/16259b4a9fa74971bd76489851d9c7f2 2024-12-16T17:57:27,422 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d931f50d815a4247a6a3494b887c3b1b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d931f50d815a4247a6a3494b887c3b1b 2024-12-16T17:57:27,422 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/7f51f3735b1b43e88139f4816ffcd031 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/7f51f3735b1b43e88139f4816ffcd031 2024-12-16T17:57:27,422 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0fb1db9cfcd2468085fb2e8ad058f470 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/0fb1db9cfcd2468085fb2e8ad058f470 2024-12-16T17:57:27,422 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/a5d4761eae92409585ad222fdf5cf957 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/a5d4761eae92409585ad222fdf5cf957 2024-12-16T17:57:27,425 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/92bb9126b28b4121a60bd85f7a66fea0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/92bb9126b28b4121a60bd85f7a66fea0 2024-12-16T17:57:27,425 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c82b4c6bd0d4095b3214eda58dfbf88 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c82b4c6bd0d4095b3214eda58dfbf88 2024-12-16T17:57:27,425 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/1cba54bfa6a7486d894a85c13f4598f3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/1cba54bfa6a7486d894a85c13f4598f3 2024-12-16T17:57:27,426 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/00c74d89c4934f4080f19e1b4e9aaa43 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/00c74d89c4934f4080f19e1b4e9aaa43 2024-12-16T17:57:27,426 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1668af17bc541ce940705dd2818bd0b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1668af17bc541ce940705dd2818bd0b 2024-12-16T17:57:27,426 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1366337e91f48eaa953f421c2de5039 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/d1366337e91f48eaa953f421c2de5039 2024-12-16T17:57:27,428 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/e65e8ebdfc614858b13553354a4f07b7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/e65e8ebdfc614858b13553354a4f07b7 2024-12-16T17:57:27,428 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/861341f7f9f44e28abdb8a5628bb5e89 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/861341f7f9f44e28abdb8a5628bb5e89 2024-12-16T17:57:27,431 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/652d941856074f2fbcc3a31ef89f1cdf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/652d941856074f2fbcc3a31ef89f1cdf 2024-12-16T17:57:27,431 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bae7a6d695514c2dbedcb0fc412faf2a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bae7a6d695514c2dbedcb0fc412faf2a 2024-12-16T17:57:27,431 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/b3037d6f8597426fba3b5b5e771880ed to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/b3037d6f8597426fba3b5b5e771880ed 2024-12-16T17:57:27,431 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/c929de87769e4ad392ffb2e31a1b6102 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/c929de87769e4ad392ffb2e31a1b6102 2024-12-16T17:57:27,431 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ebb81563988a43939276f33d1b25559e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ebb81563988a43939276f33d1b25559e 2024-12-16T17:57:27,431 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ab5f11a8d0a04df98673fed99afc69ed to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/ab5f11a8d0a04df98673fed99afc69ed 2024-12-16T17:57:27,431 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/795357db82374053b61402c7bde286c2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/795357db82374053b61402c7bde286c2 2024-12-16T17:57:27,446 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/recovered.edits/389.seqid, newMaxSeqId=389, maxSeqId=4 2024-12-16T17:57:27,447 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad. 2024-12-16T17:57:27,447 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1635): Region close journal for 7212a7dec92fa5781081695b56d809ad: 2024-12-16T17:57:27,449 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(170): Closed 7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:27,449 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=7212a7dec92fa5781081695b56d809ad, regionState=CLOSED 2024-12-16T17:57:27,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-16T17:57:27,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; CloseRegionProcedure 7212a7dec92fa5781081695b56d809ad, server=3609ad07831c,39733,1734371789085 in 1.1870 sec 2024-12-16T17:57:27,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-16T17:57:27,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7212a7dec92fa5781081695b56d809ad, UNASSIGN in 1.1940 sec 2024-12-16T17:57:27,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-16T17:57:27,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1970 sec 2024-12-16T17:57:27,457 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371847457"}]},"ts":"1734371847457"} 2024-12-16T17:57:27,458 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-16T17:57:27,506 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-16T17:57:27,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.3510 sec 2024-12-16T17:57:27,534 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T17:57:28,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T17:57:28,264 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-16T17:57:28,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-16T17:57:28,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:28,266 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:28,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-16T17:57:28,267 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=59, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:28,268 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,271 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/recovered.edits] 2024-12-16T17:57:28,275 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/462ae9fd449047e98cb42e99f27743c5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/462ae9fd449047e98cb42e99f27743c5 2024-12-16T17:57:28,275 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/5dae3698eee54ee18b3bc5817237539a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/5dae3698eee54ee18b3bc5817237539a 2024-12-16T17:57:28,275 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/df0fba99142b4dda9d2c1954d48466e8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/df0fba99142b4dda9d2c1954d48466e8 2024-12-16T17:57:28,275 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f283ddca78a74eb4b9eda82d60858428 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/A/f283ddca78a74eb4b9eda82d60858428 2024-12-16T17:57:28,279 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/f492b432674647ccbd2c48211b287158 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/f492b432674647ccbd2c48211b287158 2024-12-16T17:57:28,279 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ef1811e997a6470a98d2089a9e7924e3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/ef1811e997a6470a98d2089a9e7924e3 2024-12-16T17:57:28,279 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/70c573e0eb7c497db61e61e9cda672ae to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/70c573e0eb7c497db61e61e9cda672ae 2024-12-16T17:57:28,279 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/870111d09468422db0cab8f2e56ada62 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/B/870111d09468422db0cab8f2e56ada62 2024-12-16T17:57:28,284 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bf04503173dd4f14bffedbfaddc1f3c2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/bf04503173dd4f14bffedbfaddc1f3c2 2024-12-16T17:57:28,285 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c3555f4098341e5b2da32351e53fc1c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/3c3555f4098341e5b2da32351e53fc1c 2024-12-16T17:57:28,285 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/83de57082e67492688a44efacb4b1aea to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/83de57082e67492688a44efacb4b1aea 2024-12-16T17:57:28,285 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/6608f4f6f9444a8b813ae5930674f24a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/C/6608f4f6f9444a8b813ae5930674f24a 2024-12-16T17:57:28,288 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/recovered.edits/389.seqid to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad/recovered.edits/389.seqid 2024-12-16T17:57:28,289 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,289 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-16T17:57:28,289 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-16T17:57:28,290 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-16T17:57:28,315 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412162156aa640f214e0ea36a77a12b581c3a_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412162156aa640f214e0ea36a77a12b581c3a_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,315 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412163f7e259435934a7ebc5dd74ce6c8fbad_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412163f7e259435934a7ebc5dd74ce6c8fbad_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,315 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121621c3f57590b841ba8bc4f98394ba43b6_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121621c3f57590b841ba8bc4f98394ba43b6_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,315 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412165c6b21ec322e40d39f6a6bf9769edc7e_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412165c6b21ec322e40d39f6a6bf9769edc7e_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,316 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121656d4ff57e0664d229452714000dabfcc_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121656d4ff57e0664d229452714000dabfcc_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,316 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168797999db3a94280b90b1ad29b787fd0_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168797999db3a94280b90b1ad29b787fd0_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,316 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121680072e0a39584fcd9e5264f541d0ce40_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121680072e0a39584fcd9e5264f541d0ce40_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,316 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216895dff39e0fe49358df80ed03e7fe90a_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216895dff39e0fe49358df80ed03e7fe90a_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,318 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121690afff550bc5458d9f70c8e32d0c0e39_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121690afff550bc5458d9f70c8e32d0c0e39_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,318 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121694c907d4fba24bd6bd6a5a1931737de0_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121694c907d4fba24bd6bd6a5a1931737de0_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,318 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216955de616b26e4e3bb643e7f6cb6f243c_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216955de616b26e4e3bb643e7f6cb6f243c_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,319 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216aa6bd760860f47fa9ad2324543fe39e3_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216aa6bd760860f47fa9ad2324543fe39e3_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,319 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216c6fa9c2d3c4142729849f3ba72a420d6_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216c6fa9c2d3c4142729849f3ba72a420d6_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,319 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a8605fcefbfc4f49ae798ddc228082a2_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a8605fcefbfc4f49ae798ddc228082a2_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,319 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216cabb03eacfe94d4e9f2b12485c728d17_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216cabb03eacfe94d4e9f2b12485c728d17_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,320 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216d76a8582e0d9454aa2e4a596995dff40_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216d76a8582e0d9454aa2e4a596995dff40_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,321 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216dd85b38478ee42578a05e4b16ef19e71_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216dd85b38478ee42578a05e4b16ef19e71_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,321 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216f12d793ef0d742299bb0ff2cd81c3045_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216f12d793ef0d742299bb0ff2cd81c3045_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,321 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216f2c8a5725e9f4213b80c889c8cd28d9f_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216f2c8a5725e9f4213b80c889c8cd28d9f_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,321 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216bbd5fc7fead7434b9a8aab8173c80215_7212a7dec92fa5781081695b56d809ad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216bbd5fc7fead7434b9a8aab8173c80215_7212a7dec92fa5781081695b56d809ad 2024-12-16T17:57:28,323 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-16T17:57:28,325 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=59, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:28,328 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-16T17:57:28,334 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-16T17:57:28,335 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=59, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:28,336 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-16T17:57:28,336 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734371848336"}]},"ts":"9223372036854775807"} 2024-12-16T17:57:28,338 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T17:57:28,338 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7212a7dec92fa5781081695b56d809ad, NAME => 'TestAcidGuarantees,,1734371821088.7212a7dec92fa5781081695b56d809ad.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T17:57:28,338 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-16T17:57:28,340 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734371848338"}]},"ts":"9223372036854775807"} 2024-12-16T17:57:28,342 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-16T17:57:28,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-16T17:57:28,374 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=59, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:28,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 109 msec 2024-12-16T17:57:28,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-16T17:57:28,570 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-16T17:57:28,587 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=248 (was 247) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-145401003_22 at /127.0.0.1:50562 [Waiting for operation #113] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-389028407_22 at /127.0.0.1:53006 [Waiting for operation #640] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-389028407_22 at /127.0.0.1:38494 [Waiting for operation #584] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-145401003_22 at /127.0.0.1:50550 [Waiting for operation #115] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=464 (was 458) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=451 (was 221) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3329 (was 3690) 2024-12-16T17:57:28,595 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=248, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=451, ProcessCount=11, AvailableMemoryMB=3329 2024-12-16T17:57:28,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:57:28,597 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:57:28,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:28,598 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T17:57:28,599 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:28,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 60 2024-12-16T17:57:28,599 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T17:57:28,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-16T17:57:28,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742076_1252 (size=963) 2024-12-16T17:57:28,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-16T17:57:28,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-16T17:57:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-16T17:57:29,009 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:57:29,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742077_1253 (size=53) 2024-12-16T17:57:29,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-16T17:57:29,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:29,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9edd584a765d2a226d81ae3095fa4916, disabling compactions & flushes 2024-12-16T17:57:29,416 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. after waiting 0 ms 2024-12-16T17:57:29,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,416 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:29,417 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T17:57:29,417 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734371849417"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734371849417"}]},"ts":"1734371849417"} 2024-12-16T17:57:29,418 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T17:57:29,419 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T17:57:29,419 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371849419"}]},"ts":"1734371849419"} 2024-12-16T17:57:29,420 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-16T17:57:29,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916, ASSIGN}] 2024-12-16T17:57:29,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916, ASSIGN 2024-12-16T17:57:29,441 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916, ASSIGN; state=OFFLINE, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=false 2024-12-16T17:57:29,592 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=9edd584a765d2a226d81ae3095fa4916, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:29,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; OpenRegionProcedure 9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:29,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-16T17:57:29,745 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:29,749 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,749 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7285): Opening region: {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:57:29,749 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,749 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:29,750 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7327): checking encryption for 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,750 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7330): checking classloading for 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,751 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,754 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:29,754 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9edd584a765d2a226d81ae3095fa4916 columnFamilyName A 2024-12-16T17:57:29,754 DEBUG [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:29,755 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.HStore(327): Store=9edd584a765d2a226d81ae3095fa4916/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:29,755 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,756 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:29,756 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9edd584a765d2a226d81ae3095fa4916 columnFamilyName B 2024-12-16T17:57:29,756 DEBUG [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:29,757 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.HStore(327): Store=9edd584a765d2a226d81ae3095fa4916/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:29,757 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,758 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:29,759 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9edd584a765d2a226d81ae3095fa4916 columnFamilyName C 2024-12-16T17:57:29,759 DEBUG [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:29,759 INFO [StoreOpener-9edd584a765d2a226d81ae3095fa4916-1 {}] regionserver.HStore(327): Store=9edd584a765d2a226d81ae3095fa4916/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:29,760 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,761 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,761 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,763 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:57:29,765 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1085): writing seq id for 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:29,767 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:57:29,768 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1102): Opened 9edd584a765d2a226d81ae3095fa4916; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62783732, jitterRate=-0.0644494891166687}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:57:29,768 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1001): Region open journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:29,769 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., pid=62, masterSystemTime=1734371849745 2024-12-16T17:57:29,771 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,771 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:29,771 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=9edd584a765d2a226d81ae3095fa4916, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:29,775 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-16T17:57:29,775 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; OpenRegionProcedure 9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 in 179 msec 2024-12-16T17:57:29,777 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-12-16T17:57:29,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916, ASSIGN in 335 msec 2024-12-16T17:57:29,779 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T17:57:29,780 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371849779"}]},"ts":"1734371849779"} 2024-12-16T17:57:29,781 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-16T17:57:29,791 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T17:57:29,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1940 sec 2024-12-16T17:57:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-16T17:57:30,705 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 60 completed 2024-12-16T17:57:30,707 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ec51c1b to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@74f8da64 2024-12-16T17:57:30,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10b37ecb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,742 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:30,745 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45432, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:30,747 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T17:57:30,748 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T17:57:30,751 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65cb9e20 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@57525307 2024-12-16T17:57:30,764 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c1c3a00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,766 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x55db8dd7 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10d42600 2024-12-16T17:57:30,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2da5c220, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,784 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x094b9cbe to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@389e4fb1 2024-12-16T17:57:30,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b292e40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,801 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x567fe249 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cd6dcf0 2024-12-16T17:57:30,814 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e7fba04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,816 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x651a82a0 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4f01e3af 2024-12-16T17:57:30,832 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c3ddfd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,833 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1359ecb3 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@21a86a5e 2024-12-16T17:57:30,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c2a2199, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3838b5fe to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a20c23d 2024-12-16T17:57:30,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@174b0609, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,876 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49d7f41f to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15bae632 2024-12-16T17:57:30,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a8ed1c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,903 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x71eeb576 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@33a7460f 2024-12-16T17:57:30,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f8b9178, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,919 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62a8cb31 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bb66e55 2024-12-16T17:57:30,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2875220a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:30,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:30,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-16T17:57:30,941 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:30,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T17:57:30,942 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:30,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:30,965 DEBUG [hconnection-0x5a9c0f9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:30,965 DEBUG [hconnection-0x3168fae0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:30,966 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:30,967 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:30,981 DEBUG [hconnection-0x4e82be12-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:30,984 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:30,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:30,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:30,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:30,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:30,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:30,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:30,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:30,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:31,004 DEBUG [hconnection-0xa092da9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:31,006 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:31,012 DEBUG [hconnection-0x1cbac576-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:31,014 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:31,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371911018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371911020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371911026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,034 DEBUG [hconnection-0x6e15f1f4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:31,037 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:31,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371911038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T17:57:31,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d8c330f2b8b146708621caf329c5f1db is 50, key is test_row_0/A:col10/1734371850983/Put/seqid=0 2024-12-16T17:57:31,051 DEBUG [hconnection-0x638016be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:31,053 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:31,070 DEBUG [hconnection-0x724fba4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:31,071 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:31,094 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:31,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:31,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,106 DEBUG [hconnection-0x366dd741-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:31,108 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:31,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371911122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371911123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371911128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,145 DEBUG [hconnection-0x77e8ff4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:31,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742078_1254 (size=12001) 2024-12-16T17:57:31,146 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:31,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371911143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d8c330f2b8b146708621caf329c5f1db 2024-12-16T17:57:31,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371911150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/2e84d862af4642caa0fea02e3ed71b5f is 50, key is test_row_0/B:col10/1734371850983/Put/seqid=0 2024-12-16T17:57:31,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T17:57:31,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:31,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:31,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371911253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742079_1255 (size=12001) 2024-12-16T17:57:31,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371911326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371911336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371911343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371911350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:31,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:31,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371911460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T17:57:31,555 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:31,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:31,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,556 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371911631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371911641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371911646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371911653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/2e84d862af4642caa0fea02e3ed71b5f 2024-12-16T17:57:31,715 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:31,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:31,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/b22197d3e7aa43c787bfe959c47580bf is 50, key is test_row_0/C:col10/1734371850983/Put/seqid=0 2024-12-16T17:57:31,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742080_1256 (size=12001) 2024-12-16T17:57:31,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:31,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371911766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,869 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:31,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:31,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:31,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:31,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,025 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:32,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:32,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T17:57:32,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371912135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371912147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371912153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371912155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/b22197d3e7aa43c787bfe959c47580bf 2024-12-16T17:57:32,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d8c330f2b8b146708621caf329c5f1db as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d8c330f2b8b146708621caf329c5f1db 2024-12-16T17:57:32,180 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:32,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:32,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,181 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d8c330f2b8b146708621caf329c5f1db, entries=150, sequenceid=12, filesize=11.7 K 2024-12-16T17:57:32,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/2e84d862af4642caa0fea02e3ed71b5f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/2e84d862af4642caa0fea02e3ed71b5f 2024-12-16T17:57:32,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/2e84d862af4642caa0fea02e3ed71b5f, entries=150, sequenceid=12, filesize=11.7 K 2024-12-16T17:57:32,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/b22197d3e7aa43c787bfe959c47580bf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b22197d3e7aa43c787bfe959c47580bf 2024-12-16T17:57:32,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b22197d3e7aa43c787bfe959c47580bf, entries=150, sequenceid=12, filesize=11.7 K 2024-12-16T17:57:32,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9edd584a765d2a226d81ae3095fa4916 in 1219ms, sequenceid=12, compaction requested=false 2024-12-16T17:57:32,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:32,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:32,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:57:32,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:32,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:32,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:32,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:32,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:32,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:32,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/aebe984fc5ec4a5fbbd4060edacc4bdc is 50, key is test_row_0/A:col10/1734371851016/Put/seqid=0 2024-12-16T17:57:32,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742081_1257 (size=12001) 2024-12-16T17:57:32,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/aebe984fc5ec4a5fbbd4060edacc4bdc 2024-12-16T17:57:32,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:32,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:32,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:32,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371912340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/e3db51af415c4c82a3b564f36ed6fc52 is 50, key is test_row_0/B:col10/1734371851016/Put/seqid=0 2024-12-16T17:57:32,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742082_1258 (size=12001) 2024-12-16T17:57:32,389 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/e3db51af415c4c82a3b564f36ed6fc52 2024-12-16T17:57:32,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/ee7ec8f49a99418e9772925db6304cc2 is 50, key is test_row_0/C:col10/1734371851016/Put/seqid=0 2024-12-16T17:57:32,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742083_1259 (size=12001) 2024-12-16T17:57:32,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:32,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371912444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:32,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:32,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,642 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:32,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:32,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:32,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371912649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,795 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:32,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:32,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:32,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/ee7ec8f49a99418e9772925db6304cc2 2024-12-16T17:57:32,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/aebe984fc5ec4a5fbbd4060edacc4bdc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/aebe984fc5ec4a5fbbd4060edacc4bdc 2024-12-16T17:57:32,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/aebe984fc5ec4a5fbbd4060edacc4bdc, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:57:32,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/e3db51af415c4c82a3b564f36ed6fc52 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e3db51af415c4c82a3b564f36ed6fc52 2024-12-16T17:57:32,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e3db51af415c4c82a3b564f36ed6fc52, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:57:32,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/ee7ec8f49a99418e9772925db6304cc2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/ee7ec8f49a99418e9772925db6304cc2 2024-12-16T17:57:32,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/ee7ec8f49a99418e9772925db6304cc2, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:57:32,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 9edd584a765d2a226d81ae3095fa4916 in 592ms, sequenceid=38, compaction requested=false 2024-12-16T17:57:32,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:32,947 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:32,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-16T17:57:32,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:32,948 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-16T17:57:32,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:32,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:32,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:32,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:32,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:32,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:32,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:32,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:32,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd is 50, key is test_row_0/A:col10/1734371852335/Put/seqid=0 2024-12-16T17:57:32,986 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-16T17:57:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742084_1260 (size=9657) 2024-12-16T17:57:33,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T17:57:33,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371913060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371913146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371913159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371913161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371913166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371913168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,323 DEBUG [master/3609ad07831c:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region a4053c31d189c903d02c8274354da0e8 changed from -1.0 to 0.0, refreshing cache 2024-12-16T17:57:33,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371913371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,410 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd 2024-12-16T17:57:33,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/69b84cb11a014304979fbdf85cf3667f is 50, key is test_row_0/B:col10/1734371852335/Put/seqid=0 2024-12-16T17:57:33,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742085_1261 (size=9657) 2024-12-16T17:57:33,453 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/69b84cb11a014304979fbdf85cf3667f 2024-12-16T17:57:33,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/70e586f8731f4726a8572a43ae6758eb is 50, key is test_row_0/C:col10/1734371852335/Put/seqid=0 2024-12-16T17:57:33,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742086_1262 (size=9657) 2024-12-16T17:57:33,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:33,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371913675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:33,909 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/70e586f8731f4726a8572a43ae6758eb 2024-12-16T17:57:33,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd 2024-12-16T17:57:33,927 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd, entries=100, sequenceid=48, filesize=9.4 K 2024-12-16T17:57:33,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/69b84cb11a014304979fbdf85cf3667f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/69b84cb11a014304979fbdf85cf3667f 2024-12-16T17:57:33,937 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/69b84cb11a014304979fbdf85cf3667f, entries=100, sequenceid=48, filesize=9.4 K 2024-12-16T17:57:33,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/70e586f8731f4726a8572a43ae6758eb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70e586f8731f4726a8572a43ae6758eb 2024-12-16T17:57:33,949 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70e586f8731f4726a8572a43ae6758eb, entries=100, sequenceid=48, filesize=9.4 K 2024-12-16T17:57:33,952 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 9edd584a765d2a226d81ae3095fa4916 in 1003ms, sequenceid=48, compaction requested=true 2024-12-16T17:57:33,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:33,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:33,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-16T17:57:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-16T17:57:33,957 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-16T17:57:33,957 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0130 sec 2024-12-16T17:57:33,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 3.0200 sec 2024-12-16T17:57:34,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-16T17:57:34,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:34,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:34,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:34,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:34,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:34,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:34,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:34,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/ededcf6667c544aba64455be628d26c4 is 50, key is test_row_0/A:col10/1734371853053/Put/seqid=0 2024-12-16T17:57:34,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:34,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371914208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:34,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742087_1263 (size=12001) 2024-12-16T17:57:34,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/ededcf6667c544aba64455be628d26c4 2024-12-16T17:57:34,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/d42e6255ca504492ad3598ef2dc7482e is 50, key is test_row_0/B:col10/1734371853053/Put/seqid=0 2024-12-16T17:57:34,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:34,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371914312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:34,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742088_1264 (size=12001) 2024-12-16T17:57:34,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/d42e6255ca504492ad3598ef2dc7482e 2024-12-16T17:57:34,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/d695ad09dd7f4b20a8c737285fdf6989 is 50, key is test_row_0/C:col10/1734371853053/Put/seqid=0 2024-12-16T17:57:34,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742089_1265 (size=12001) 2024-12-16T17:57:34,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/d695ad09dd7f4b20a8c737285fdf6989 2024-12-16T17:57:34,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/ededcf6667c544aba64455be628d26c4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ededcf6667c544aba64455be628d26c4 2024-12-16T17:57:34,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ededcf6667c544aba64455be628d26c4, entries=150, sequenceid=75, filesize=11.7 K 2024-12-16T17:57:34,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/d42e6255ca504492ad3598ef2dc7482e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/d42e6255ca504492ad3598ef2dc7482e 2024-12-16T17:57:34,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/d42e6255ca504492ad3598ef2dc7482e, entries=150, sequenceid=75, filesize=11.7 K 2024-12-16T17:57:34,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/d695ad09dd7f4b20a8c737285fdf6989 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/d695ad09dd7f4b20a8c737285fdf6989 2024-12-16T17:57:34,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/d695ad09dd7f4b20a8c737285fdf6989, entries=150, sequenceid=75, filesize=11.7 K 2024-12-16T17:57:34,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9edd584a765d2a226d81ae3095fa4916 in 240ms, sequenceid=75, compaction requested=true 2024-12-16T17:57:34,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:34,425 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:34,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:34,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:34,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:34,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:34,426 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:34,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:34,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:34,427 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:34,427 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:34,427 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:34,427 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d8c330f2b8b146708621caf329c5f1db, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/aebe984fc5ec4a5fbbd4060edacc4bdc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ededcf6667c544aba64455be628d26c4] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=44.6 K 2024-12-16T17:57:34,428 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8c330f2b8b146708621caf329c5f1db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734371850974 2024-12-16T17:57:34,428 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:34,428 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting aebe984fc5ec4a5fbbd4060edacc4bdc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371851013 2024-12-16T17:57:34,428 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:34,428 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:34,428 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/2e84d862af4642caa0fea02e3ed71b5f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e3db51af415c4c82a3b564f36ed6fc52, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/69b84cb11a014304979fbdf85cf3667f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/d42e6255ca504492ad3598ef2dc7482e] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=44.6 K 2024-12-16T17:57:34,429 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb3769fb7a554a47ad0a6ee7bb9aa3dd, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1734371852322 2024-12-16T17:57:34,429 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ededcf6667c544aba64455be628d26c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734371853039 2024-12-16T17:57:34,429 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e84d862af4642caa0fea02e3ed71b5f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734371850974 2024-12-16T17:57:34,430 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e3db51af415c4c82a3b564f36ed6fc52, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371851013 2024-12-16T17:57:34,430 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 69b84cb11a014304979fbdf85cf3667f, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1734371852322 2024-12-16T17:57:34,431 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d42e6255ca504492ad3598ef2dc7482e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734371853039 2024-12-16T17:57:34,449 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:34,450 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/ff0e0852c9ef43a1b0a86fe9a7998e00 is 50, key is test_row_0/A:col10/1734371853053/Put/seqid=0 2024-12-16T17:57:34,462 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#220 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:34,463 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/390a157c70f143e492441f1ce599e703 is 50, key is test_row_0/B:col10/1734371853053/Put/seqid=0 2024-12-16T17:57:34,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742090_1266 (size=12139) 2024-12-16T17:57:34,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742091_1267 (size=12139) 2024-12-16T17:57:34,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:34,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:34,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:34,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:34,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:34,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:34,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:34,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:34,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/090efc5c96494f04a3a3b0c03ada981a is 50, key is test_row_0/A:col10/1734371854203/Put/seqid=0 2024-12-16T17:57:34,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742092_1268 (size=12001) 2024-12-16T17:57:34,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/090efc5c96494f04a3a3b0c03ada981a 2024-12-16T17:57:34,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/29070dccd7c6430a86162bd0c570a598 is 50, key is test_row_0/B:col10/1734371854203/Put/seqid=0 2024-12-16T17:57:34,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:34,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371914638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:34,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742093_1269 (size=12001) 2024-12-16T17:57:34,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:34,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371914743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:34,927 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/ff0e0852c9ef43a1b0a86fe9a7998e00 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ff0e0852c9ef43a1b0a86fe9a7998e00 2024-12-16T17:57:34,931 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/390a157c70f143e492441f1ce599e703 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/390a157c70f143e492441f1ce599e703 2024-12-16T17:57:34,939 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into ff0e0852c9ef43a1b0a86fe9a7998e00(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:34,939 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:34,939 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=12, startTime=1734371854425; duration=0sec 2024-12-16T17:57:34,939 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:34,939 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:34,939 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:34,943 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:34,943 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:34,943 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:34,943 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b22197d3e7aa43c787bfe959c47580bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/ee7ec8f49a99418e9772925db6304cc2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70e586f8731f4726a8572a43ae6758eb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/d695ad09dd7f4b20a8c737285fdf6989] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=44.6 K 2024-12-16T17:57:34,945 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting b22197d3e7aa43c787bfe959c47580bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734371850974 2024-12-16T17:57:34,945 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee7ec8f49a99418e9772925db6304cc2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371851013 2024-12-16T17:57:34,947 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70e586f8731f4726a8572a43ae6758eb, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1734371852322 2024-12-16T17:57:34,947 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d695ad09dd7f4b20a8c737285fdf6989, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734371853039 2024-12-16T17:57:34,949 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into 390a157c70f143e492441f1ce599e703(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:34,949 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:34,949 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=12, startTime=1734371854426; duration=0sec 2024-12-16T17:57:34,949 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:34,949 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:34,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:34,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371914949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:34,963 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#223 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:34,964 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/70d106120a764da5a6f4746be14078fb is 50, key is test_row_0/C:col10/1734371853053/Put/seqid=0 2024-12-16T17:57:35,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742094_1270 (size=12139) 2024-12-16T17:57:35,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/29070dccd7c6430a86162bd0c570a598 2024-12-16T17:57:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T17:57:35,047 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-16T17:57:35,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-16T17:57:35,051 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-16T17:57:35,052 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:35,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:35,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/0b729069623444e4a44757eb00b79c68 is 50, key is test_row_0/C:col10/1734371854203/Put/seqid=0 2024-12-16T17:57:35,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742095_1271 (size=12001) 2024-12-16T17:57:35,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/0b729069623444e4a44757eb00b79c68 2024-12-16T17:57:35,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/090efc5c96494f04a3a3b0c03ada981a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/090efc5c96494f04a3a3b0c03ada981a 2024-12-16T17:57:35,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/090efc5c96494f04a3a3b0c03ada981a, entries=150, sequenceid=86, filesize=11.7 K 2024-12-16T17:57:35,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/29070dccd7c6430a86162bd0c570a598 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/29070dccd7c6430a86162bd0c570a598 2024-12-16T17:57:35,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/29070dccd7c6430a86162bd0c570a598, entries=150, sequenceid=86, filesize=11.7 K 2024-12-16T17:57:35,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/0b729069623444e4a44757eb00b79c68 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/0b729069623444e4a44757eb00b79c68 2024-12-16T17:57:35,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/0b729069623444e4a44757eb00b79c68, entries=150, sequenceid=86, filesize=11.7 K 2024-12-16T17:57:35,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9edd584a765d2a226d81ae3095fa4916 in 602ms, sequenceid=86, compaction requested=false 2024-12-16T17:57:35,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-16T17:57:35,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:57:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:35,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/86cdfcd5b50544368b721664d3695a24 is 50, key is test_row_0/A:col10/1734371854636/Put/seqid=0 2024-12-16T17:57:35,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371915193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371915197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371915198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371915198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,203 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:35,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:35,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,204 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742096_1272 (size=14341) 2024-12-16T17:57:35,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/86cdfcd5b50544368b721664d3695a24 2024-12-16T17:57:35,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7d58f837bb99457a8828c40f6abcb04d is 50, key is test_row_0/B:col10/1734371854636/Put/seqid=0 2024-12-16T17:57:35,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371915257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742097_1273 (size=12001) 2024-12-16T17:57:35,271 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7d58f837bb99457a8828c40f6abcb04d 2024-12-16T17:57:35,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5788755821084a2091b31a6b1564fe58 is 50, key is test_row_0/C:col10/1734371854636/Put/seqid=0 2024-12-16T17:57:35,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742098_1274 (size=12001) 2024-12-16T17:57:35,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371915299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371915299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371915303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371915304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-16T17:57:35,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,437 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/70d106120a764da5a6f4746be14078fb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70d106120a764da5a6f4746be14078fb 2024-12-16T17:57:35,443 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 70d106120a764da5a6f4746be14078fb(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:35,443 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:35,443 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=12, startTime=1734371854426; duration=0sec 2024-12-16T17:57:35,443 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:35,443 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:35,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371915501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371915501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371915507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371915507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,509 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:35,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,510 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-16T17:57:35,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5788755821084a2091b31a6b1564fe58 2024-12-16T17:57:35,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/86cdfcd5b50544368b721664d3695a24 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/86cdfcd5b50544368b721664d3695a24 2024-12-16T17:57:35,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/86cdfcd5b50544368b721664d3695a24, entries=200, sequenceid=114, filesize=14.0 K 2024-12-16T17:57:35,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7d58f837bb99457a8828c40f6abcb04d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7d58f837bb99457a8828c40f6abcb04d 2024-12-16T17:57:35,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7d58f837bb99457a8828c40f6abcb04d, entries=150, sequenceid=114, filesize=11.7 K 2024-12-16T17:57:35,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5788755821084a2091b31a6b1564fe58 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5788755821084a2091b31a6b1564fe58 2024-12-16T17:57:35,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5788755821084a2091b31a6b1564fe58, entries=150, sequenceid=114, filesize=11.7 K 2024-12-16T17:57:35,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 9edd584a765d2a226d81ae3095fa4916 in 550ms, sequenceid=114, compaction requested=true 2024-12-16T17:57:35,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:35,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:35,714 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:35,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:35,714 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:35,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:35,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:35,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:35,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:35,715 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:35,715 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:35,715 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:35,715 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:35,715 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,715 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,715 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ff0e0852c9ef43a1b0a86fe9a7998e00, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/090efc5c96494f04a3a3b0c03ada981a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/86cdfcd5b50544368b721664d3695a24] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=37.6 K 2024-12-16T17:57:35,715 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/390a157c70f143e492441f1ce599e703, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/29070dccd7c6430a86162bd0c570a598, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7d58f837bb99457a8828c40f6abcb04d] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=35.3 K 2024-12-16T17:57:35,715 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff0e0852c9ef43a1b0a86fe9a7998e00, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734371853039 2024-12-16T17:57:35,715 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 390a157c70f143e492441f1ce599e703, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734371853039 2024-12-16T17:57:35,716 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 090efc5c96494f04a3a3b0c03ada981a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1734371854195 2024-12-16T17:57:35,716 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 29070dccd7c6430a86162bd0c570a598, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1734371854195 2024-12-16T17:57:35,716 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86cdfcd5b50544368b721664d3695a24, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734371854589 2024-12-16T17:57:35,716 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d58f837bb99457a8828c40f6abcb04d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734371854632 2024-12-16T17:57:35,722 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:35,723 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/0b4ad3bd36414bdf9dea359392699961 is 50, key is test_row_0/A:col10/1734371854636/Put/seqid=0 2024-12-16T17:57:35,726 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:35,726 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7a1d7cb97b6f410ba0602eb41f8ba647 is 50, key is test_row_0/B:col10/1734371854636/Put/seqid=0 2024-12-16T17:57:35,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742100_1276 (size=12241) 2024-12-16T17:57:35,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742099_1275 (size=12241) 2024-12-16T17:57:35,738 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/0b4ad3bd36414bdf9dea359392699961 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0b4ad3bd36414bdf9dea359392699961 2024-12-16T17:57:35,742 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into 0b4ad3bd36414bdf9dea359392699961(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:35,742 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:35,742 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=13, startTime=1734371855714; duration=0sec 2024-12-16T17:57:35,742 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:35,742 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:35,743 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:35,744 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:35,744 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:35,744 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,744 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70d106120a764da5a6f4746be14078fb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/0b729069623444e4a44757eb00b79c68, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5788755821084a2091b31a6b1564fe58] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=35.3 K 2024-12-16T17:57:35,744 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70d106120a764da5a6f4746be14078fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734371853039 2024-12-16T17:57:35,744 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b729069623444e4a44757eb00b79c68, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1734371854195 2024-12-16T17:57:35,745 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5788755821084a2091b31a6b1564fe58, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734371854632 2024-12-16T17:57:35,752 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#230 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:35,753 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/792b6d3bdea642cb966aec965256d2c1 is 50, key is test_row_0/C:col10/1734371854636/Put/seqid=0 2024-12-16T17:57:35,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:35,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-16T17:57:35,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:35,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:35,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742101_1277 (size=12241) 2024-12-16T17:57:35,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:35,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:35,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:35,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d9efa881ce15457a830a3cbb6f569400 is 50, key is test_row_0/A:col10/1734371855168/Put/seqid=0 2024-12-16T17:57:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742102_1278 (size=14341) 2024-12-16T17:57:35,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d9efa881ce15457a830a3cbb6f569400 2024-12-16T17:57:35,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ccb92aac72354daebd36f8c213020d6a is 50, key is test_row_0/B:col10/1734371855168/Put/seqid=0 2024-12-16T17:57:35,814 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:35,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:35,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371915813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371915813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742103_1279 (size=12001) 2024-12-16T17:57:35,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371915815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371915816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371915817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371915919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371915919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371915919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371915919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371915919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:35,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:35,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:35,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:35,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,119 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:36,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:36,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371916122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371916123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371916123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371916129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371916129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,135 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7a1d7cb97b6f410ba0602eb41f8ba647 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7a1d7cb97b6f410ba0602eb41f8ba647 2024-12-16T17:57:36,140 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into 7a1d7cb97b6f410ba0602eb41f8ba647(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:36,140 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:36,140 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=13, startTime=1734371855714; duration=0sec 2024-12-16T17:57:36,140 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:36,140 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:36,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-16T17:57:36,170 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/792b6d3bdea642cb966aec965256d2c1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/792b6d3bdea642cb966aec965256d2c1 2024-12-16T17:57:36,174 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 792b6d3bdea642cb966aec965256d2c1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:36,174 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:36,174 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=13, startTime=1734371855714; duration=0sec 2024-12-16T17:57:36,174 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:36,174 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:36,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ccb92aac72354daebd36f8c213020d6a 2024-12-16T17:57:36,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/04beb5edf4fb48d9b2c10ca861ad8885 is 50, key is test_row_0/C:col10/1734371855168/Put/seqid=0 2024-12-16T17:57:36,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742104_1280 (size=12001) 2024-12-16T17:57:36,272 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:36,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:36,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,424 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:36,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:36,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371916425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371916427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371916427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371916441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371916441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,579 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,580 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:36,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/04beb5edf4fb48d9b2c10ca861ad8885 2024-12-16T17:57:36,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d9efa881ce15457a830a3cbb6f569400 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d9efa881ce15457a830a3cbb6f569400 2024-12-16T17:57:36,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d9efa881ce15457a830a3cbb6f569400, entries=200, sequenceid=128, filesize=14.0 K 2024-12-16T17:57:36,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ccb92aac72354daebd36f8c213020d6a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ccb92aac72354daebd36f8c213020d6a 2024-12-16T17:57:36,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ccb92aac72354daebd36f8c213020d6a, entries=150, sequenceid=128, filesize=11.7 K 2024-12-16T17:57:36,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/04beb5edf4fb48d9b2c10ca861ad8885 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/04beb5edf4fb48d9b2c10ca861ad8885 2024-12-16T17:57:36,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/04beb5edf4fb48d9b2c10ca861ad8885, entries=150, sequenceid=128, filesize=11.7 K 2024-12-16T17:57:36,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 9edd584a765d2a226d81ae3095fa4916 in 881ms, sequenceid=128, compaction requested=false 2024-12-16T17:57:36,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:36,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-16T17:57:36,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:36,733 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-16T17:57:36,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:36,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:36,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:36,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:36,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:36,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:36,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/0d26f1108c444d76b2ff5bb43c127458 is 50, key is test_row_0/A:col10/1734371855808/Put/seqid=0 2024-12-16T17:57:36,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742105_1281 (size=12151) 2024-12-16T17:57:36,741 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/0d26f1108c444d76b2ff5bb43c127458 2024-12-16T17:57:36,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/e6d16fe9711740b89e9195b91fe54650 is 50, key is test_row_0/B:col10/1734371855808/Put/seqid=0 2024-12-16T17:57:36,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742106_1282 (size=12151) 2024-12-16T17:57:36,766 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/e6d16fe9711740b89e9195b91fe54650 2024-12-16T17:57:36,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/918470d694f74f93bd48c746f6a8aa1b is 50, key is test_row_0/C:col10/1734371855808/Put/seqid=0 2024-12-16T17:57:36,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742107_1283 (size=12151) 2024-12-16T17:57:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:36,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:36,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371916945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371916945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371916946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371916946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:36,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371916946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371917049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371917049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371917050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-16T17:57:37,190 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/918470d694f74f93bd48c746f6a8aa1b 2024-12-16T17:57:37,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/0d26f1108c444d76b2ff5bb43c127458 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0d26f1108c444d76b2ff5bb43c127458 2024-12-16T17:57:37,202 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0d26f1108c444d76b2ff5bb43c127458, entries=150, sequenceid=154, filesize=11.9 K 2024-12-16T17:57:37,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/e6d16fe9711740b89e9195b91fe54650 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e6d16fe9711740b89e9195b91fe54650 2024-12-16T17:57:37,210 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e6d16fe9711740b89e9195b91fe54650, entries=150, sequenceid=154, filesize=11.9 K 2024-12-16T17:57:37,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/918470d694f74f93bd48c746f6a8aa1b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/918470d694f74f93bd48c746f6a8aa1b 2024-12-16T17:57:37,219 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/918470d694f74f93bd48c746f6a8aa1b, entries=150, sequenceid=154, filesize=11.9 K 2024-12-16T17:57:37,220 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 9edd584a765d2a226d81ae3095fa4916 in 487ms, sequenceid=154, compaction requested=true 2024-12-16T17:57:37,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:37,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:37,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-16T17:57:37,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-16T17:57:37,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-16T17:57:37,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1690 sec 2024-12-16T17:57:37,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 2.1730 sec 2024-12-16T17:57:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:37,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-16T17:57:37,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:37,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:37,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:37,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:37,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:37,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:37,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b2c80ed673994ed98011715ef421612c is 50, key is test_row_0/A:col10/1734371857253/Put/seqid=0 2024-12-16T17:57:37,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371917275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371917279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371917279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742108_1284 (size=9757) 2024-12-16T17:57:37,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371917380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371917395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371917395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371917584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371917597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371917598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b2c80ed673994ed98011715ef421612c 2024-12-16T17:57:37,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/4d4f4c8649454fe49b6563f5e0a47a70 is 50, key is test_row_0/B:col10/1734371857253/Put/seqid=0 2024-12-16T17:57:37,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742109_1285 (size=9757) 2024-12-16T17:57:37,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371917888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371917900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371917900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371917949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:37,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371917957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:38,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/4d4f4c8649454fe49b6563f5e0a47a70 2024-12-16T17:57:38,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5a54cfce3eb040778deda37272c5a79b is 50, key is test_row_0/C:col10/1734371857253/Put/seqid=0 2024-12-16T17:57:38,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742110_1286 (size=9757) 2024-12-16T17:57:38,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371918390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:38,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371918404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:38,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371918404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:38,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5a54cfce3eb040778deda37272c5a79b 2024-12-16T17:57:38,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b2c80ed673994ed98011715ef421612c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b2c80ed673994ed98011715ef421612c 2024-12-16T17:57:38,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b2c80ed673994ed98011715ef421612c, entries=100, sequenceid=169, filesize=9.5 K 2024-12-16T17:57:38,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/4d4f4c8649454fe49b6563f5e0a47a70 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/4d4f4c8649454fe49b6563f5e0a47a70 2024-12-16T17:57:38,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/4d4f4c8649454fe49b6563f5e0a47a70, entries=100, sequenceid=169, filesize=9.5 K 2024-12-16T17:57:38,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5a54cfce3eb040778deda37272c5a79b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5a54cfce3eb040778deda37272c5a79b 2024-12-16T17:57:38,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5a54cfce3eb040778deda37272c5a79b, entries=100, sequenceid=169, filesize=9.5 K 2024-12-16T17:57:38,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 9edd584a765d2a226d81ae3095fa4916 in 1296ms, sequenceid=169, compaction requested=true 2024-12-16T17:57:38,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:38,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:38,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:38,549 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:38,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:38,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:38,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:38,549 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:38,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:38,550 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48490 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:38,550 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46150 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:38,550 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:38,550 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:38,550 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:38,550 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:38,551 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7a1d7cb97b6f410ba0602eb41f8ba647, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ccb92aac72354daebd36f8c213020d6a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e6d16fe9711740b89e9195b91fe54650, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/4d4f4c8649454fe49b6563f5e0a47a70] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=45.1 K 2024-12-16T17:57:38,551 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0b4ad3bd36414bdf9dea359392699961, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d9efa881ce15457a830a3cbb6f569400, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0d26f1108c444d76b2ff5bb43c127458, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b2c80ed673994ed98011715ef421612c] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=47.4 K 2024-12-16T17:57:38,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a1d7cb97b6f410ba0602eb41f8ba647, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734371854632 2024-12-16T17:57:38,551 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b4ad3bd36414bdf9dea359392699961, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734371854632 2024-12-16T17:57:38,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ccb92aac72354daebd36f8c213020d6a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734371855168 2024-12-16T17:57:38,551 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9efa881ce15457a830a3cbb6f569400, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734371855168 2024-12-16T17:57:38,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e6d16fe9711740b89e9195b91fe54650, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734371855808 2024-12-16T17:57:38,552 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d26f1108c444d76b2ff5bb43c127458, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734371855808 2024-12-16T17:57:38,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d4f4c8649454fe49b6563f5e0a47a70, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734371856940 2024-12-16T17:57:38,552 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2c80ed673994ed98011715ef421612c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734371856940 2024-12-16T17:57:38,561 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#240 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:38,561 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#241 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:38,562 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/83a748fc859349c3a759c32466de4793 is 50, key is test_row_0/B:col10/1734371857253/Put/seqid=0 2024-12-16T17:57:38,562 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/5147d5cddff041fab1c52700c0422ef0 is 50, key is test_row_0/A:col10/1734371857253/Put/seqid=0 2024-12-16T17:57:38,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742112_1288 (size=12527) 2024-12-16T17:57:38,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742111_1287 (size=12527) 2024-12-16T17:57:38,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-16T17:57:38,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-16T17:57:39,006 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/5147d5cddff041fab1c52700c0422ef0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5147d5cddff041fab1c52700c0422ef0 2024-12-16T17:57:39,007 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/83a748fc859349c3a759c32466de4793 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/83a748fc859349c3a759c32466de4793 2024-12-16T17:57:39,018 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into 83a748fc859349c3a759c32466de4793(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:39,019 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:39,019 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=12, startTime=1734371858549; duration=0sec 2024-12-16T17:57:39,019 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:39,019 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:39,019 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:39,020 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into 5147d5cddff041fab1c52700c0422ef0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:39,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:39,020 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=12, startTime=1734371858549; duration=0sec 2024-12-16T17:57:39,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:39,020 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:39,022 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46150 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:39,022 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:39,023 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,023 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/792b6d3bdea642cb966aec965256d2c1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/04beb5edf4fb48d9b2c10ca861ad8885, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/918470d694f74f93bd48c746f6a8aa1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5a54cfce3eb040778deda37272c5a79b] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=45.1 K 2024-12-16T17:57:39,024 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 792b6d3bdea642cb966aec965256d2c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734371854632 2024-12-16T17:57:39,025 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 04beb5edf4fb48d9b2c10ca861ad8885, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734371855168 2024-12-16T17:57:39,025 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 918470d694f74f93bd48c746f6a8aa1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734371855808 2024-12-16T17:57:39,027 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a54cfce3eb040778deda37272c5a79b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734371856940 2024-12-16T17:57:39,040 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#242 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:39,040 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5606ff8b0bff4b6ba9335f56cfb00bc3 is 50, key is test_row_0/C:col10/1734371857253/Put/seqid=0 2024-12-16T17:57:39,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742113_1289 (size=12527) 2024-12-16T17:57:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-16T17:57:39,157 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-16T17:57:39,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-16T17:57:39,159 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-16T17:57:39,160 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:39,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-16T17:57:39,311 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-16T17:57:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,317 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-16T17:57:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:39,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:39,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:39,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/6289fec02d764918a743798704c5d3e6 is 50, key is test_row_0/A:col10/1734371857278/Put/seqid=0 2024-12-16T17:57:39,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742114_1290 (size=12151) 2024-12-16T17:57:39,333 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/6289fec02d764918a743798704c5d3e6 2024-12-16T17:57:39,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/798b1d134cbc4b8aa92704ae49cd3d3f is 50, key is test_row_0/B:col10/1734371857278/Put/seqid=0 2024-12-16T17:57:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742115_1291 (size=12151) 2024-12-16T17:57:39,376 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/798b1d134cbc4b8aa92704ae49cd3d3f 2024-12-16T17:57:39,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:39,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/389f6513679c4c0ba89e791b25ceb8bf is 50, key is test_row_0/C:col10/1734371857278/Put/seqid=0 2024-12-16T17:57:39,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742116_1292 (size=12151) 2024-12-16T17:57:39,402 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/389f6513679c4c0ba89e791b25ceb8bf 2024-12-16T17:57:39,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/6289fec02d764918a743798704c5d3e6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/6289fec02d764918a743798704c5d3e6 2024-12-16T17:57:39,417 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/6289fec02d764918a743798704c5d3e6, entries=150, sequenceid=193, filesize=11.9 K 2024-12-16T17:57:39,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/798b1d134cbc4b8aa92704ae49cd3d3f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/798b1d134cbc4b8aa92704ae49cd3d3f 2024-12-16T17:57:39,422 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/798b1d134cbc4b8aa92704ae49cd3d3f, entries=150, sequenceid=193, filesize=11.9 K 2024-12-16T17:57:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/389f6513679c4c0ba89e791b25ceb8bf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/389f6513679c4c0ba89e791b25ceb8bf 2024-12-16T17:57:39,427 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/389f6513679c4c0ba89e791b25ceb8bf, entries=150, sequenceid=193, filesize=11.9 K 2024-12-16T17:57:39,436 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=13.42 KB/13740 for 9edd584a765d2a226d81ae3095fa4916 in 118ms, sequenceid=193, compaction requested=false 2024-12-16T17:57:39,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:39,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-16T17:57:39,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-16T17:57:39,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-16T17:57:39,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 277 msec 2024-12-16T17:57:39,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 281 msec 2024-12-16T17:57:39,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:39,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:39,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:39,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:39,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:39,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:39,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:39,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:39,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/df12539b3ad0476c960a5df2afcc043e is 50, key is test_row_0/A:col10/1734371859441/Put/seqid=0 2024-12-16T17:57:39,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742117_1293 (size=14541) 2024-12-16T17:57:39,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-16T17:57:39,462 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-16T17:57:39,462 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/5606ff8b0bff4b6ba9335f56cfb00bc3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5606ff8b0bff4b6ba9335f56cfb00bc3 2024-12-16T17:57:39,463 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:39,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-16T17:57:39,464 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:39,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-16T17:57:39,465 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:39,465 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:39,468 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 5606ff8b0bff4b6ba9335f56cfb00bc3(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:39,468 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:39,468 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=12, startTime=1734371858549; duration=0sec 2024-12-16T17:57:39,468 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:39,468 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:39,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371919472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371919474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371919483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-16T17:57:39,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371919584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371919586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371919586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,617 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-16T17:57:39,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:39,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-16T17:57:39,769 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-16T17:57:39,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:39,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371919788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371919790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371919791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/df12539b3ad0476c960a5df2afcc043e 2024-12-16T17:57:39,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/a84e3508a9b941889c19de3f73369ebf is 50, key is test_row_0/B:col10/1734371859441/Put/seqid=0 2024-12-16T17:57:39,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742118_1294 (size=12151) 2024-12-16T17:57:39,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/a84e3508a9b941889c19de3f73369ebf 2024-12-16T17:57:39,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/da4c7dc3266c4eb298b341abfcb7d10c is 50, key is test_row_0/C:col10/1734371859441/Put/seqid=0 2024-12-16T17:57:39,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742119_1295 (size=12151) 2024-12-16T17:57:39,922 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-16T17:57:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:39,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:39,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371919961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,963 DEBUG [Thread-1165 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:57:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371919966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:39,968 DEBUG [Thread-1167 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:57:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-16T17:57:40,075 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-16T17:57:40,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:40,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:40,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371920091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371920093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371920095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,227 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-16T17:57:40,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:40,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:40,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:40,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:40,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/da4c7dc3266c4eb298b341abfcb7d10c 2024-12-16T17:57:40,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/df12539b3ad0476c960a5df2afcc043e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/df12539b3ad0476c960a5df2afcc043e 2024-12-16T17:57:40,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/df12539b3ad0476c960a5df2afcc043e, entries=200, sequenceid=204, filesize=14.2 K 2024-12-16T17:57:40,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/a84e3508a9b941889c19de3f73369ebf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a84e3508a9b941889c19de3f73369ebf 2024-12-16T17:57:40,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a84e3508a9b941889c19de3f73369ebf, entries=150, sequenceid=204, filesize=11.9 K 2024-12-16T17:57:40,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/da4c7dc3266c4eb298b341abfcb7d10c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/da4c7dc3266c4eb298b341abfcb7d10c 2024-12-16T17:57:40,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/da4c7dc3266c4eb298b341abfcb7d10c, entries=150, sequenceid=204, filesize=11.9 K 2024-12-16T17:57:40,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9edd584a765d2a226d81ae3095fa4916 in 852ms, sequenceid=204, compaction requested=true 2024-12-16T17:57:40,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:40,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:40,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:40,294 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:40,294 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:40,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:40,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:40,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:40,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:40,295 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:40,295 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39219 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:40,295 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:40,295 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:40,295 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,295 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,295 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5147d5cddff041fab1c52700c0422ef0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/6289fec02d764918a743798704c5d3e6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/df12539b3ad0476c960a5df2afcc043e] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=38.3 K 2024-12-16T17:57:40,295 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/83a748fc859349c3a759c32466de4793, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/798b1d134cbc4b8aa92704ae49cd3d3f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a84e3508a9b941889c19de3f73369ebf] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.0 K 2024-12-16T17:57:40,295 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5147d5cddff041fab1c52700c0422ef0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734371855816 2024-12-16T17:57:40,295 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 83a748fc859349c3a759c32466de4793, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734371855816 2024-12-16T17:57:40,296 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 798b1d134cbc4b8aa92704ae49cd3d3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734371857271 2024-12-16T17:57:40,296 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6289fec02d764918a743798704c5d3e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734371857271 2024-12-16T17:57:40,296 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting a84e3508a9b941889c19de3f73369ebf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371859415 2024-12-16T17:57:40,296 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting df12539b3ad0476c960a5df2afcc043e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371859415 2024-12-16T17:57:40,304 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:40,304 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:40,304 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ce3a0ccaa8f44d8c8e9fcff7dd8e0353 is 50, key is test_row_0/B:col10/1734371859441/Put/seqid=0 2024-12-16T17:57:40,304 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/edc8531fd19841ce88fbd9d9d84bffed is 50, key is test_row_0/A:col10/1734371859441/Put/seqid=0 2024-12-16T17:57:40,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742120_1296 (size=12629) 2024-12-16T17:57:40,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742121_1297 (size=12629) 2024-12-16T17:57:40,380 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-16T17:57:40,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,380 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:57:40,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:40,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:40,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:40,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:40,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:40,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/9cb89fd772694309b6e1711fbeca756f is 50, key is test_row_0/A:col10/1734371859466/Put/seqid=0 2024-12-16T17:57:40,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742122_1298 (size=12151) 2024-12-16T17:57:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-16T17:57:40,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:40,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:40,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371920603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371920603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371920604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371920707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371920707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371920707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,724 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ce3a0ccaa8f44d8c8e9fcff7dd8e0353 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ce3a0ccaa8f44d8c8e9fcff7dd8e0353 2024-12-16T17:57:40,725 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/edc8531fd19841ce88fbd9d9d84bffed as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/edc8531fd19841ce88fbd9d9d84bffed 2024-12-16T17:57:40,733 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into edc8531fd19841ce88fbd9d9d84bffed(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:40,733 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into ce3a0ccaa8f44d8c8e9fcff7dd8e0353(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:40,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:40,733 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:40,734 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=13, startTime=1734371860294; duration=0sec 2024-12-16T17:57:40,734 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=13, startTime=1734371860294; duration=0sec 2024-12-16T17:57:40,734 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:40,734 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:40,734 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:40,734 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:40,734 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:40,741 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:40,741 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:40,742 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,742 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5606ff8b0bff4b6ba9335f56cfb00bc3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/389f6513679c4c0ba89e791b25ceb8bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/da4c7dc3266c4eb298b341abfcb7d10c] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.0 K 2024-12-16T17:57:40,742 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5606ff8b0bff4b6ba9335f56cfb00bc3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1734371855816 2024-12-16T17:57:40,742 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 389f6513679c4c0ba89e791b25ceb8bf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734371857271 2024-12-16T17:57:40,743 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting da4c7dc3266c4eb298b341abfcb7d10c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371859415 2024-12-16T17:57:40,758 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:40,758 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/595f76a63fda4ee3940e2a21d1fcc3e0 is 50, key is test_row_0/C:col10/1734371859441/Put/seqid=0 2024-12-16T17:57:40,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742123_1299 (size=12629) 2024-12-16T17:57:40,780 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/595f76a63fda4ee3940e2a21d1fcc3e0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/595f76a63fda4ee3940e2a21d1fcc3e0 2024-12-16T17:57:40,786 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 595f76a63fda4ee3940e2a21d1fcc3e0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:40,786 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:40,786 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=13, startTime=1734371860294; duration=0sec 2024-12-16T17:57:40,786 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:40,786 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:40,791 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/9cb89fd772694309b6e1711fbeca756f 2024-12-16T17:57:40,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ee0463a44e2d43c09e26a508f369adc3 is 50, key is test_row_0/B:col10/1734371859466/Put/seqid=0 2024-12-16T17:57:40,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742124_1300 (size=12151) 2024-12-16T17:57:40,809 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ee0463a44e2d43c09e26a508f369adc3 2024-12-16T17:57:40,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/9672b275fd17443a9333307601bcf329 is 50, key is test_row_0/C:col10/1734371859466/Put/seqid=0 2024-12-16T17:57:40,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742125_1301 (size=12151) 2024-12-16T17:57:40,820 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/9672b275fd17443a9333307601bcf329 2024-12-16T17:57:40,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/9cb89fd772694309b6e1711fbeca756f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9cb89fd772694309b6e1711fbeca756f 2024-12-16T17:57:40,831 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9cb89fd772694309b6e1711fbeca756f, entries=150, sequenceid=231, filesize=11.9 K 2024-12-16T17:57:40,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/ee0463a44e2d43c09e26a508f369adc3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ee0463a44e2d43c09e26a508f369adc3 2024-12-16T17:57:40,837 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ee0463a44e2d43c09e26a508f369adc3, entries=150, sequenceid=231, filesize=11.9 K 2024-12-16T17:57:40,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/9672b275fd17443a9333307601bcf329 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9672b275fd17443a9333307601bcf329 2024-12-16T17:57:40,843 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9672b275fd17443a9333307601bcf329, entries=150, sequenceid=231, filesize=11.9 K 2024-12-16T17:57:40,844 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 9edd584a765d2a226d81ae3095fa4916 in 464ms, sequenceid=231, compaction requested=false 2024-12-16T17:57:40,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:40,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:40,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-16T17:57:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-16T17:57:40,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-16T17:57:40,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3800 sec 2024-12-16T17:57:40,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.3840 sec 2024-12-16T17:57:40,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:40,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:40,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:40,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:40,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:40,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:40,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:40,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:40,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b11d7a7e1143462aa782022df054b4c8 is 50, key is test_row_0/A:col10/1734371860603/Put/seqid=0 2024-12-16T17:57:40,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371920943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371920946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:40,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371920946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:40,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742126_1302 (size=12151) 2024-12-16T17:57:40,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b11d7a7e1143462aa782022df054b4c8 2024-12-16T17:57:40,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/9553250f5ffd4b1a828dee0e8e6842b7 is 50, key is test_row_0/B:col10/1734371860603/Put/seqid=0 2024-12-16T17:57:41,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742127_1303 (size=12151) 2024-12-16T17:57:41,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/9553250f5ffd4b1a828dee0e8e6842b7 2024-12-16T17:57:41,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/095a780f6e0c48b4a905ed5c12f1da55 is 50, key is test_row_0/C:col10/1734371860603/Put/seqid=0 2024-12-16T17:57:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742128_1304 (size=12151) 2024-12-16T17:57:41,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/095a780f6e0c48b4a905ed5c12f1da55 2024-12-16T17:57:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371921047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371921050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371921050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b11d7a7e1143462aa782022df054b4c8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b11d7a7e1143462aa782022df054b4c8 2024-12-16T17:57:41,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b11d7a7e1143462aa782022df054b4c8, entries=150, sequenceid=245, filesize=11.9 K 2024-12-16T17:57:41,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/9553250f5ffd4b1a828dee0e8e6842b7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/9553250f5ffd4b1a828dee0e8e6842b7 2024-12-16T17:57:41,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/9553250f5ffd4b1a828dee0e8e6842b7, entries=150, sequenceid=245, filesize=11.9 K 2024-12-16T17:57:41,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/095a780f6e0c48b4a905ed5c12f1da55 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/095a780f6e0c48b4a905ed5c12f1da55 2024-12-16T17:57:41,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/095a780f6e0c48b4a905ed5c12f1da55, entries=150, sequenceid=245, filesize=11.9 K 2024-12-16T17:57:41,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9edd584a765d2a226d81ae3095fa4916 in 170ms, sequenceid=245, compaction requested=true 2024-12-16T17:57:41,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:41,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:41,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:41,081 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:41,081 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:41,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:41,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:41,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:41,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:41,082 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:41,082 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:41,083 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:41,083 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ce3a0ccaa8f44d8c8e9fcff7dd8e0353, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ee0463a44e2d43c09e26a508f369adc3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/9553250f5ffd4b1a828dee0e8e6842b7] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.1 K 2024-12-16T17:57:41,083 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:41,083 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:41,083 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:41,083 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ce3a0ccaa8f44d8c8e9fcff7dd8e0353, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371859415 2024-12-16T17:57:41,083 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/edc8531fd19841ce88fbd9d9d84bffed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9cb89fd772694309b6e1711fbeca756f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b11d7a7e1143462aa782022df054b4c8] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.1 K 2024-12-16T17:57:41,083 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting edc8531fd19841ce88fbd9d9d84bffed, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371859415 2024-12-16T17:57:41,083 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ee0463a44e2d43c09e26a508f369adc3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734371859466 2024-12-16T17:57:41,084 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cb89fd772694309b6e1711fbeca756f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734371859466 2024-12-16T17:57:41,084 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 9553250f5ffd4b1a828dee0e8e6842b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734371860602 2024-12-16T17:57:41,084 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting b11d7a7e1143462aa782022df054b4c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734371860602 2024-12-16T17:57:41,105 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:41,106 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/6bdc432834464006827d507e3ca887a6 is 50, key is test_row_0/B:col10/1734371860603/Put/seqid=0 2024-12-16T17:57:41,108 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:41,108 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/45dc969387eb488b94a27699e9d0ec80 is 50, key is test_row_0/A:col10/1734371860603/Put/seqid=0 2024-12-16T17:57:41,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742129_1305 (size=12731) 2024-12-16T17:57:41,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742130_1306 (size=12731) 2024-12-16T17:57:41,139 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/45dc969387eb488b94a27699e9d0ec80 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/45dc969387eb488b94a27699e9d0ec80 2024-12-16T17:57:41,143 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into 45dc969387eb488b94a27699e9d0ec80(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:41,143 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:41,143 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=13, startTime=1734371861081; duration=0sec 2024-12-16T17:57:41,143 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:41,143 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:41,143 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:41,145 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:41,145 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:41,145 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:41,145 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/595f76a63fda4ee3940e2a21d1fcc3e0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9672b275fd17443a9333307601bcf329, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/095a780f6e0c48b4a905ed5c12f1da55] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.1 K 2024-12-16T17:57:41,145 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 595f76a63fda4ee3940e2a21d1fcc3e0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371859415 2024-12-16T17:57:41,146 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9672b275fd17443a9333307601bcf329, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734371859466 2024-12-16T17:57:41,146 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 095a780f6e0c48b4a905ed5c12f1da55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734371860602 2024-12-16T17:57:41,151 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:41,152 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/fde1ef3e633045e8be14cc3d0e2c6196 is 50, key is test_row_0/C:col10/1734371860603/Put/seqid=0 2024-12-16T17:57:41,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742131_1307 (size=12731) 2024-12-16T17:57:41,161 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/fde1ef3e633045e8be14cc3d0e2c6196 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/fde1ef3e633045e8be14cc3d0e2c6196 2024-12-16T17:57:41,165 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into fde1ef3e633045e8be14cc3d0e2c6196(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:41,165 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:41,165 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=13, startTime=1734371861082; duration=0sec 2024-12-16T17:57:41,165 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:41,165 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:41,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:41,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-16T17:57:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:41,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/f854a9e208b941ccafc3f0cb3cad46f4 is 50, key is test_row_0/A:col10/1734371860944/Put/seqid=0 2024-12-16T17:57:41,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742132_1308 (size=12301) 2024-12-16T17:57:41,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371921259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371921260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371921260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371921364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371921364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371921364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,520 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/6bdc432834464006827d507e3ca887a6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/6bdc432834464006827d507e3ca887a6 2024-12-16T17:57:41,526 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into 6bdc432834464006827d507e3ca887a6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:41,526 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:41,526 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=13, startTime=1734371861081; duration=0sec 2024-12-16T17:57:41,526 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:41,526 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:41,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371921566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371921566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-16T17:57:41,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,568 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-16T17:57:41,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371921567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:41,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-16T17:57:41,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T17:57:41,572 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:41,572 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:41,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:41,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/f854a9e208b941ccafc3f0cb3cad46f4 2024-12-16T17:57:41,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/8fbe6a980a704242825fab6bfa36f48b is 50, key is test_row_0/B:col10/1734371860944/Put/seqid=0 2024-12-16T17:57:41,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742133_1309 (size=12301) 2024-12-16T17:57:41,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T17:57:41,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-16T17:57:41,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:41,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:41,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:41,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:41,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:41,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:41,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371921868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371921869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:41,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371921871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T17:57:41,876 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:41,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-16T17:57:41,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:41,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:41,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:41,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:41,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:41,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:42,030 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-16T17:57:42,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:42,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:42,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:42,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:42,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:42,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/8fbe6a980a704242825fab6bfa36f48b 2024-12-16T17:57:42,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/6b41f05d800749ac8b0f822120c2bc6f is 50, key is test_row_0/C:col10/1734371860944/Put/seqid=0 2024-12-16T17:57:42,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742134_1310 (size=12301) 2024-12-16T17:57:42,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/6b41f05d800749ac8b0f822120c2bc6f 2024-12-16T17:57:42,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/f854a9e208b941ccafc3f0cb3cad46f4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f854a9e208b941ccafc3f0cb3cad46f4 2024-12-16T17:57:42,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f854a9e208b941ccafc3f0cb3cad46f4, entries=150, sequenceid=274, filesize=12.0 K 2024-12-16T17:57:42,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/8fbe6a980a704242825fab6bfa36f48b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8fbe6a980a704242825fab6bfa36f48b 2024-12-16T17:57:42,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8fbe6a980a704242825fab6bfa36f48b, entries=150, sequenceid=274, filesize=12.0 K 2024-12-16T17:57:42,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/6b41f05d800749ac8b0f822120c2bc6f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/6b41f05d800749ac8b0f822120c2bc6f 2024-12-16T17:57:42,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/6b41f05d800749ac8b0f822120c2bc6f, entries=150, sequenceid=274, filesize=12.0 K 2024-12-16T17:57:42,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 9edd584a765d2a226d81ae3095fa4916 in 900ms, sequenceid=274, compaction requested=false 2024-12-16T17:57:42,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:42,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T17:57:42,183 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-16T17:57:42,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:42,185 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-16T17:57:42,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:42,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:42,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:42,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:42,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:42,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:42,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/163f14959f6448d7b0df2252e5a9d2ea is 50, key is test_row_0/A:col10/1734371861255/Put/seqid=0 2024-12-16T17:57:42,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742135_1311 (size=9857) 2024-12-16T17:57:42,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:42,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371922455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371922455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371922461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371922562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371922562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371922566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,637 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/163f14959f6448d7b0df2252e5a9d2ea 2024-12-16T17:57:42,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/76d6710c5f0146baa12348d2788626a8 is 50, key is test_row_0/B:col10/1734371861255/Put/seqid=0 2024-12-16T17:57:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T17:57:42,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742136_1312 (size=9857) 2024-12-16T17:57:42,682 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/76d6710c5f0146baa12348d2788626a8 2024-12-16T17:57:42,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/aec5289a7b48477f9919f0da3e6523db is 50, key is test_row_0/C:col10/1734371861255/Put/seqid=0 2024-12-16T17:57:42,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742137_1313 (size=9857) 2024-12-16T17:57:42,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371922765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371922766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:42,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371922770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371923068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371923068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371923072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,096 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/aec5289a7b48477f9919f0da3e6523db 2024-12-16T17:57:43,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/163f14959f6448d7b0df2252e5a9d2ea as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/163f14959f6448d7b0df2252e5a9d2ea 2024-12-16T17:57:43,103 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/163f14959f6448d7b0df2252e5a9d2ea, entries=100, sequenceid=285, filesize=9.6 K 2024-12-16T17:57:43,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/76d6710c5f0146baa12348d2788626a8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/76d6710c5f0146baa12348d2788626a8 2024-12-16T17:57:43,106 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/76d6710c5f0146baa12348d2788626a8, entries=100, sequenceid=285, filesize=9.6 K 2024-12-16T17:57:43,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/aec5289a7b48477f9919f0da3e6523db as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/aec5289a7b48477f9919f0da3e6523db 2024-12-16T17:57:43,110 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/aec5289a7b48477f9919f0da3e6523db, entries=100, sequenceid=285, filesize=9.6 K 2024-12-16T17:57:43,111 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 9edd584a765d2a226d81ae3095fa4916 in 927ms, sequenceid=285, compaction requested=true 2024-12-16T17:57:43,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:43,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:43,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-16T17:57:43,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-16T17:57:43,113 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-16T17:57:43,113 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5400 sec 2024-12-16T17:57:43,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.5430 sec 2024-12-16T17:57:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:43,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-16T17:57:43,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:43,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:43,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:43,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:43,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:43,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:43,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/e9748a47b8b54251beadee6f5240470f is 50, key is test_row_0/A:col10/1734371862453/Put/seqid=0 2024-12-16T17:57:43,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371923578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371923578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742138_1314 (size=12301) 2024-12-16T17:57:43,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/e9748a47b8b54251beadee6f5240470f 2024-12-16T17:57:43,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371923579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/f767d2bea540449ba3f4c73b747c58ab is 50, key is test_row_0/B:col10/1734371862453/Put/seqid=0 2024-12-16T17:57:43,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742139_1315 (size=12301) 2024-12-16T17:57:43,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/f767d2bea540449ba3f4c73b747c58ab 2024-12-16T17:57:43,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/3b0ca63b404f441ba0092f577479dd1a is 50, key is test_row_0/C:col10/1734371862453/Put/seqid=0 2024-12-16T17:57:43,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742140_1316 (size=12301) 2024-12-16T17:57:43,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T17:57:43,675 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-16T17:57:43,677 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:43,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-16T17:57:43,679 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:43,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-16T17:57:43,679 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:43,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:43,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371923681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371923681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371923683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-16T17:57:43,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-16T17:57:43,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:43,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:43,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:43,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:43,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:43,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:43,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371923885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371923885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371923885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-16T17:57:43,984 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-16T17:57:43,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:43,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:43,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:43,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:43,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:43,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:43,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45446 deadline: 1734371923992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,995 DEBUG [Thread-1167 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:57:43,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:43,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45532 deadline: 1734371923997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:43,998 DEBUG [Thread-1165 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8183 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:57:44,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/3b0ca63b404f441ba0092f577479dd1a 2024-12-16T17:57:44,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/e9748a47b8b54251beadee6f5240470f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/e9748a47b8b54251beadee6f5240470f 2024-12-16T17:57:44,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/e9748a47b8b54251beadee6f5240470f, entries=150, sequenceid=312, filesize=12.0 K 2024-12-16T17:57:44,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/f767d2bea540449ba3f4c73b747c58ab as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/f767d2bea540449ba3f4c73b747c58ab 2024-12-16T17:57:44,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/f767d2bea540449ba3f4c73b747c58ab, entries=150, sequenceid=312, filesize=12.0 K 2024-12-16T17:57:44,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/3b0ca63b404f441ba0092f577479dd1a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/3b0ca63b404f441ba0092f577479dd1a 2024-12-16T17:57:44,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/3b0ca63b404f441ba0092f577479dd1a, entries=150, sequenceid=312, filesize=12.0 K 2024-12-16T17:57:44,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9edd584a765d2a226d81ae3095fa4916 in 480ms, sequenceid=312, compaction requested=true 2024-12-16T17:57:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:44,052 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:44,053 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:44,054 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:44,054 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:44,054 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:44,054 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/6bdc432834464006827d507e3ca887a6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8fbe6a980a704242825fab6bfa36f48b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/76d6710c5f0146baa12348d2788626a8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/f767d2bea540449ba3f4c73b747c58ab] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=46.1 K 2024-12-16T17:57:44,055 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:44,055 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:44,055 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:44,055 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/45dc969387eb488b94a27699e9d0ec80, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f854a9e208b941ccafc3f0cb3cad46f4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/163f14959f6448d7b0df2252e5a9d2ea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/e9748a47b8b54251beadee6f5240470f] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=46.1 K 2024-12-16T17:57:44,056 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bdc432834464006827d507e3ca887a6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734371860602 2024-12-16T17:57:44,056 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45dc969387eb488b94a27699e9d0ec80, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734371860602 2024-12-16T17:57:44,056 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fbe6a980a704242825fab6bfa36f48b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734371860942 2024-12-16T17:57:44,056 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f854a9e208b941ccafc3f0cb3cad46f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734371860942 2024-12-16T17:57:44,057 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 163f14959f6448d7b0df2252e5a9d2ea, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734371861255 2024-12-16T17:57:44,057 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 76d6710c5f0146baa12348d2788626a8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734371861255 2024-12-16T17:57:44,057 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9748a47b8b54251beadee6f5240470f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734371862453 2024-12-16T17:57:44,063 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting f767d2bea540449ba3f4c73b747c58ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734371862453 2024-12-16T17:57:44,078 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:44,079 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/3b69673bc32244ac9bbc02d3e38837f9 is 50, key is test_row_0/A:col10/1734371862453/Put/seqid=0 2024-12-16T17:57:44,085 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#271 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:44,086 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7df85e88d0f7442284e56ab807a250d2 is 50, key is test_row_0/B:col10/1734371862453/Put/seqid=0 2024-12-16T17:57:44,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742141_1317 (size=13017) 2024-12-16T17:57:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742142_1318 (size=13017) 2024-12-16T17:57:44,114 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/3b69673bc32244ac9bbc02d3e38837f9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/3b69673bc32244ac9bbc02d3e38837f9 2024-12-16T17:57:44,120 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7df85e88d0f7442284e56ab807a250d2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7df85e88d0f7442284e56ab807a250d2 2024-12-16T17:57:44,122 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into 3b69673bc32244ac9bbc02d3e38837f9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:44,122 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:44,122 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=12, startTime=1734371864052; duration=0sec 2024-12-16T17:57:44,122 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:44,122 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:44,122 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:44,125 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:44,125 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:44,125 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:44,125 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/fde1ef3e633045e8be14cc3d0e2c6196, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/6b41f05d800749ac8b0f822120c2bc6f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/aec5289a7b48477f9919f0da3e6523db, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/3b0ca63b404f441ba0092f577479dd1a] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=46.1 K 2024-12-16T17:57:44,126 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting fde1ef3e633045e8be14cc3d0e2c6196, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734371860602 2024-12-16T17:57:44,126 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b41f05d800749ac8b0f822120c2bc6f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734371860942 2024-12-16T17:57:44,127 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting aec5289a7b48477f9919f0da3e6523db, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734371861255 2024-12-16T17:57:44,129 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into 7df85e88d0f7442284e56ab807a250d2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:44,129 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:44,129 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=12, startTime=1734371864053; duration=0sec 2024-12-16T17:57:44,129 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:44,129 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:44,129 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b0ca63b404f441ba0092f577479dd1a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734371862453 2024-12-16T17:57:44,141 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-16T17:57:44,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:44,142 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-16T17:57:44,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:44,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:44,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:44,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:44,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:44,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:44,147 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#272 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:44,147 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/35bfb5128a414adc9736480f8dd7b762 is 50, key is test_row_0/C:col10/1734371862453/Put/seqid=0 2024-12-16T17:57:44,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/7b27faf4cdd640a6a30ef945d439bfe1 is 50, key is test_row_0/A:col10/1734371863578/Put/seqid=0 2024-12-16T17:57:44,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742143_1319 (size=13017) 2024-12-16T17:57:44,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742144_1320 (size=12301) 2024-12-16T17:57:44,176 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/7b27faf4cdd640a6a30ef945d439bfe1 2024-12-16T17:57:44,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/844b3937a1c543009d17f7f1f7369509 is 50, key is test_row_0/B:col10/1734371863578/Put/seqid=0 2024-12-16T17:57:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:44,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:44,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742145_1321 (size=12301) 2024-12-16T17:57:44,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371924251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371924251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371924252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-16T17:57:44,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371924355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371924355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371924355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371924556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371924558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371924558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,568 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/35bfb5128a414adc9736480f8dd7b762 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/35bfb5128a414adc9736480f8dd7b762 2024-12-16T17:57:44,577 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 35bfb5128a414adc9736480f8dd7b762(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:44,577 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:44,577 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=12, startTime=1734371864053; duration=0sec 2024-12-16T17:57:44,577 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:44,577 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:44,614 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/844b3937a1c543009d17f7f1f7369509 2024-12-16T17:57:44,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/739d7f17b6ec46b0bd0022544ba0a117 is 50, key is test_row_0/C:col10/1734371863578/Put/seqid=0 2024-12-16T17:57:44,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742146_1322 (size=12301) 2024-12-16T17:57:44,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-16T17:57:44,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371924859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371924864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371924864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,074 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/739d7f17b6ec46b0bd0022544ba0a117 2024-12-16T17:57:45,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/7b27faf4cdd640a6a30ef945d439bfe1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/7b27faf4cdd640a6a30ef945d439bfe1 2024-12-16T17:57:45,084 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/7b27faf4cdd640a6a30ef945d439bfe1, entries=150, sequenceid=323, filesize=12.0 K 2024-12-16T17:57:45,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/844b3937a1c543009d17f7f1f7369509 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/844b3937a1c543009d17f7f1f7369509 2024-12-16T17:57:45,095 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/844b3937a1c543009d17f7f1f7369509, entries=150, sequenceid=323, filesize=12.0 K 2024-12-16T17:57:45,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/739d7f17b6ec46b0bd0022544ba0a117 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/739d7f17b6ec46b0bd0022544ba0a117 2024-12-16T17:57:45,104 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/739d7f17b6ec46b0bd0022544ba0a117, entries=150, sequenceid=323, filesize=12.0 K 2024-12-16T17:57:45,105 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 9edd584a765d2a226d81ae3095fa4916 in 963ms, sequenceid=323, compaction requested=false 2024-12-16T17:57:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:45,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-16T17:57:45,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-16T17:57:45,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-16T17:57:45,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4280 sec 2024-12-16T17:57:45,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.4320 sec 2024-12-16T17:57:45,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:45,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-16T17:57:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:45,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d987114fae5148eba8c369b0d9e31847 is 50, key is test_row_0/A:col10/1734371864199/Put/seqid=0 2024-12-16T17:57:45,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371925375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371925376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371925377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742147_1323 (size=12301) 2024-12-16T17:57:45,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d987114fae5148eba8c369b0d9e31847 2024-12-16T17:57:45,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/482cf643da9b4f0eaf5cd44fb2bf9aa7 is 50, key is test_row_0/B:col10/1734371864199/Put/seqid=0 2024-12-16T17:57:45,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742148_1324 (size=12301) 2024-12-16T17:57:45,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371925478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371925480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371925682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371925682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-16T17:57:45,783 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-16T17:57:45,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-16T17:57:45,786 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-16T17:57:45,787 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:45,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:45,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/482cf643da9b4f0eaf5cd44fb2bf9aa7 2024-12-16T17:57:45,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/26fbdc55e7f7400c90eda639d12f6796 is 50, key is test_row_0/C:col10/1734371864199/Put/seqid=0 2024-12-16T17:57:45,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742149_1325 (size=12301) 2024-12-16T17:57:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-16T17:57:45,939 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-16T17:57:45,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:45,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:45,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:45,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:45,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:45,943 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38367 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=3609ad07831c,39733,1734371789085, table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-16T17:57:45,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371925986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:45,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371925988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-16T17:57:46,093 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-16T17:57:46,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:46,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:46,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:46,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:46,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:46,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:46,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/26fbdc55e7f7400c90eda639d12f6796 2024-12-16T17:57:46,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d987114fae5148eba8c369b0d9e31847 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d987114fae5148eba8c369b0d9e31847 2024-12-16T17:57:46,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d987114fae5148eba8c369b0d9e31847, entries=150, sequenceid=353, filesize=12.0 K 2024-12-16T17:57:46,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/482cf643da9b4f0eaf5cd44fb2bf9aa7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/482cf643da9b4f0eaf5cd44fb2bf9aa7 2024-12-16T17:57:46,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/482cf643da9b4f0eaf5cd44fb2bf9aa7, entries=150, sequenceid=353, filesize=12.0 K 2024-12-16T17:57:46,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/26fbdc55e7f7400c90eda639d12f6796 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/26fbdc55e7f7400c90eda639d12f6796 2024-12-16T17:57:46,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/26fbdc55e7f7400c90eda639d12f6796, entries=150, sequenceid=353, filesize=12.0 K 2024-12-16T17:57:46,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 9edd584a765d2a226d81ae3095fa4916 in 887ms, sequenceid=353, compaction requested=true 2024-12-16T17:57:46,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:46,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-16T17:57:46,255 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:46,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:46,255 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-16T17:57:46,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:46,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:46,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:46,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:46,255 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:46,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:46,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:46,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:46,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:46,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:46,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:46,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:46,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:46,258 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:46,258 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:46,258 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:46,258 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7df85e88d0f7442284e56ab807a250d2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/844b3937a1c543009d17f7f1f7369509, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/482cf643da9b4f0eaf5cd44fb2bf9aa7] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.7 K 2024-12-16T17:57:46,258 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:46,258 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:46,258 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:46,259 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/3b69673bc32244ac9bbc02d3e38837f9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/7b27faf4cdd640a6a30ef945d439bfe1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d987114fae5148eba8c369b0d9e31847] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.7 K 2024-12-16T17:57:46,259 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7df85e88d0f7442284e56ab807a250d2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734371862453 2024-12-16T17:57:46,260 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 844b3937a1c543009d17f7f1f7369509, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1734371863577 2024-12-16T17:57:46,260 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b69673bc32244ac9bbc02d3e38837f9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734371862453 2024-12-16T17:57:46,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d00f32e57edb4ad88375f6de7b405f7b is 50, key is test_row_0/A:col10/1734371865374/Put/seqid=0 2024-12-16T17:57:46,260 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 482cf643da9b4f0eaf5cd44fb2bf9aa7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734371864199 2024-12-16T17:57:46,265 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b27faf4cdd640a6a30ef945d439bfe1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1734371863577 2024-12-16T17:57:46,265 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d987114fae5148eba8c369b0d9e31847, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734371864199 2024-12-16T17:57:46,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742150_1326 (size=9857) 2024-12-16T17:57:46,276 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#280 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:46,276 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/c511b37367cf4bbe982f3927cd73511e is 50, key is test_row_0/B:col10/1734371864199/Put/seqid=0 2024-12-16T17:57:46,279 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#281 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:46,280 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/9a2426b80f9446ddac577f0165208c42 is 50, key is test_row_0/A:col10/1734371864199/Put/seqid=0 2024-12-16T17:57:46,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742151_1327 (size=13119) 2024-12-16T17:57:46,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742152_1328 (size=13119) 2024-12-16T17:57:46,291 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/c511b37367cf4bbe982f3927cd73511e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/c511b37367cf4bbe982f3927cd73511e 2024-12-16T17:57:46,293 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/9a2426b80f9446ddac577f0165208c42 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9a2426b80f9446ddac577f0165208c42 2024-12-16T17:57:46,298 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into c511b37367cf4bbe982f3927cd73511e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:46,298 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:46,299 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=13, startTime=1734371866255; duration=0sec 2024-12-16T17:57:46,299 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:46,299 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:46,299 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:46,300 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:46,300 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:46,300 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:46,301 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/35bfb5128a414adc9736480f8dd7b762, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/739d7f17b6ec46b0bd0022544ba0a117, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/26fbdc55e7f7400c90eda639d12f6796] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=36.7 K 2024-12-16T17:57:46,302 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35bfb5128a414adc9736480f8dd7b762, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1734371862453 2024-12-16T17:57:46,303 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into 9a2426b80f9446ddac577f0165208c42(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:46,303 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:46,303 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=13, startTime=1734371866254; duration=0sec 2024-12-16T17:57:46,303 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 739d7f17b6ec46b0bd0022544ba0a117, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1734371863577 2024-12-16T17:57:46,303 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:46,303 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:46,304 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26fbdc55e7f7400c90eda639d12f6796, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734371864199 2024-12-16T17:57:46,314 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:46,314 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/9d0acdf8e57f416c8a8a682de34b59a9 is 50, key is test_row_0/C:col10/1734371864199/Put/seqid=0 2024-12-16T17:57:46,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742153_1329 (size=13119) 2024-12-16T17:57:46,360 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/9d0acdf8e57f416c8a8a682de34b59a9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9d0acdf8e57f416c8a8a682de34b59a9 2024-12-16T17:57:46,366 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 9d0acdf8e57f416c8a8a682de34b59a9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:46,366 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:46,366 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=13, startTime=1734371866255; duration=0sec 2024-12-16T17:57:46,366 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:46,366 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-16T17:57:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:46,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:46,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371926478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371926491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371926493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371926582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,674 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d00f32e57edb4ad88375f6de7b405f7b 2024-12-16T17:57:46,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/598dba38cc67414bb2bc2a0f6de7b333 is 50, key is test_row_0/B:col10/1734371865374/Put/seqid=0 2024-12-16T17:57:46,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742154_1330 (size=9857) 2024-12-16T17:57:46,745 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/598dba38cc67414bb2bc2a0f6de7b333 2024-12-16T17:57:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/360ffb32fb814e7a826ea82632384f7c is 50, key is test_row_0/C:col10/1734371865374/Put/seqid=0 2024-12-16T17:57:46,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371926785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:46,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742155_1331 (size=9857) 2024-12-16T17:57:46,799 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/360ffb32fb814e7a826ea82632384f7c 2024-12-16T17:57:46,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/d00f32e57edb4ad88375f6de7b405f7b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d00f32e57edb4ad88375f6de7b405f7b 2024-12-16T17:57:46,811 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d00f32e57edb4ad88375f6de7b405f7b, entries=100, sequenceid=360, filesize=9.6 K 2024-12-16T17:57:46,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/598dba38cc67414bb2bc2a0f6de7b333 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/598dba38cc67414bb2bc2a0f6de7b333 2024-12-16T17:57:46,818 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/598dba38cc67414bb2bc2a0f6de7b333, entries=100, sequenceid=360, filesize=9.6 K 2024-12-16T17:57:46,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/360ffb32fb814e7a826ea82632384f7c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/360ffb32fb814e7a826ea82632384f7c 2024-12-16T17:57:46,825 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/360ffb32fb814e7a826ea82632384f7c, entries=100, sequenceid=360, filesize=9.6 K 2024-12-16T17:57:46,826 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 9edd584a765d2a226d81ae3095fa4916 in 571ms, sequenceid=360, compaction requested=false 2024-12-16T17:57:46,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:46,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:46,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-16T17:57:46,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-16T17:57:46,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-16T17:57:46,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0410 sec 2024-12-16T17:57:46,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.0460 sec 2024-12-16T17:57:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-16T17:57:46,890 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-16T17:57:46,892 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:46,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-16T17:57:46,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-16T17:57:46,894 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:46,894 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:46,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:46,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-16T17:57:47,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-16T17:57:47,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,046 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-16T17:57:47,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:47,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:47,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:47,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:47,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:47,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:47,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/c30b1fbfbdd54b768c0879ce8f661472 is 50, key is test_row_0/A:col10/1734371866459/Put/seqid=0 2024-12-16T17:57:47,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742156_1332 (size=12301) 2024-12-16T17:57:47,083 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/c30b1fbfbdd54b768c0879ce8f661472 2024-12-16T17:57:47,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:47,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:47,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/320b6bc898a54e5996b38d884cf046e6 is 50, key is test_row_0/B:col10/1734371866459/Put/seqid=0 2024-12-16T17:57:47,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371927100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742157_1333 (size=12301) 2024-12-16T17:57:47,134 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/320b6bc898a54e5996b38d884cf046e6 2024-12-16T17:57:47,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/b4481327c4464533ae1db1bef32d992b is 50, key is test_row_0/C:col10/1734371866459/Put/seqid=0 2024-12-16T17:57:47,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-16T17:57:47,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742158_1334 (size=12301) 2024-12-16T17:57:47,198 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/b4481327c4464533ae1db1bef32d992b 2024-12-16T17:57:47,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:47,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371927207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/c30b1fbfbdd54b768c0879ce8f661472 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c30b1fbfbdd54b768c0879ce8f661472 2024-12-16T17:57:47,218 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c30b1fbfbdd54b768c0879ce8f661472, entries=150, sequenceid=392, filesize=12.0 K 2024-12-16T17:57:47,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/320b6bc898a54e5996b38d884cf046e6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/320b6bc898a54e5996b38d884cf046e6 2024-12-16T17:57:47,226 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/320b6bc898a54e5996b38d884cf046e6, entries=150, sequenceid=392, filesize=12.0 K 2024-12-16T17:57:47,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/b4481327c4464533ae1db1bef32d992b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b4481327c4464533ae1db1bef32d992b 2024-12-16T17:57:47,232 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b4481327c4464533ae1db1bef32d992b, entries=150, sequenceid=392, filesize=12.0 K 2024-12-16T17:57:47,232 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 9edd584a765d2a226d81ae3095fa4916 in 186ms, sequenceid=392, compaction requested=true 2024-12-16T17:57:47,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:47,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-16T17:57:47,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-16T17:57:47,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-16T17:57:47,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 340 msec 2024-12-16T17:57:47,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 343 msec 2024-12-16T17:57:47,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:47,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:47,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:47,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:47,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:47,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:47,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:47,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:47,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/f609b536080347aa8c03be0b56810506 is 50, key is test_row_0/A:col10/1734371867410/Put/seqid=0 2024-12-16T17:57:47,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742159_1335 (size=12301) 2024-12-16T17:57:47,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:47,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371927483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:47,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371927494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-16T17:57:47,497 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-16T17:57:47,498 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:47,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-16T17:57:47,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-16T17:57:47,500 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:47,500 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:47,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:47,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:47,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371927503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:47,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371927585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-16T17:57:47,651 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:47,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371927788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-16T17:57:47,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:47,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:47,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/f609b536080347aa8c03be0b56810506 2024-12-16T17:57:47,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/77b0b2b602954d7584fcb2fd70ea7de2 is 50, key is test_row_0/B:col10/1734371867410/Put/seqid=0 2024-12-16T17:57:47,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742160_1336 (size=12301) 2024-12-16T17:57:47,956 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:47,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:47,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:47,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:47,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:47,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:48,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371928091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:48,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-16T17:57:48,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:48,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:48,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:48,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/77b0b2b602954d7584fcb2fd70ea7de2 2024-12-16T17:57:48,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/1e141ca14e7345d895c239b13b126c01 is 50, key is test_row_0/C:col10/1734371867410/Put/seqid=0 2024-12-16T17:57:48,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:48,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:48,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:48,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742161_1337 (size=12301) 2024-12-16T17:57:48,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:48,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:48,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:48,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:48,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:48,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:48,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:48,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:48,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371928594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-16T17:57:48,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/1e141ca14e7345d895c239b13b126c01 2024-12-16T17:57:48,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/f609b536080347aa8c03be0b56810506 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f609b536080347aa8c03be0b56810506 2024-12-16T17:57:48,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f609b536080347aa8c03be0b56810506, entries=150, sequenceid=403, filesize=12.0 K 2024-12-16T17:57:48,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/77b0b2b602954d7584fcb2fd70ea7de2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/77b0b2b602954d7584fcb2fd70ea7de2 2024-12-16T17:57:48,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/77b0b2b602954d7584fcb2fd70ea7de2, entries=150, sequenceid=403, filesize=12.0 K 2024-12-16T17:57:48,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/1e141ca14e7345d895c239b13b126c01 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1e141ca14e7345d895c239b13b126c01 2024-12-16T17:57:48,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1e141ca14e7345d895c239b13b126c01, entries=150, sequenceid=403, filesize=12.0 K 2024-12-16T17:57:48,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9edd584a765d2a226d81ae3095fa4916 in 1286ms, sequenceid=403, compaction requested=true 2024-12-16T17:57:48,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:48,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:48,704 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:48,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:48,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:48,704 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:48,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:48,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:48,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:48,705 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:48,705 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:48,706 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,706 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:48,706 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:48,706 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,706 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9a2426b80f9446ddac577f0165208c42, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d00f32e57edb4ad88375f6de7b405f7b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c30b1fbfbdd54b768c0879ce8f661472, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f609b536080347aa8c03be0b56810506] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=46.5 K 2024-12-16T17:57:48,706 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/c511b37367cf4bbe982f3927cd73511e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/598dba38cc67414bb2bc2a0f6de7b333, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/320b6bc898a54e5996b38d884cf046e6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/77b0b2b602954d7584fcb2fd70ea7de2] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=46.5 K 2024-12-16T17:57:48,706 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a2426b80f9446ddac577f0165208c42, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734371864199 2024-12-16T17:57:48,706 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting c511b37367cf4bbe982f3927cd73511e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734371864199 2024-12-16T17:57:48,707 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 598dba38cc67414bb2bc2a0f6de7b333, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1734371865374 2024-12-16T17:57:48,707 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d00f32e57edb4ad88375f6de7b405f7b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1734371865374 2024-12-16T17:57:48,707 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 320b6bc898a54e5996b38d884cf046e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1734371866459 2024-12-16T17:57:48,707 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c30b1fbfbdd54b768c0879ce8f661472, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1734371866459 2024-12-16T17:57:48,707 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f609b536080347aa8c03be0b56810506, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1734371867410 2024-12-16T17:57:48,707 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 77b0b2b602954d7584fcb2fd70ea7de2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1734371867410 2024-12-16T17:57:48,716 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#291 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:48,717 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/eb24edd01c624651b211233fd9c49596 is 50, key is test_row_0/A:col10/1734371867410/Put/seqid=0 2024-12-16T17:57:48,718 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:48,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-16T17:57:48,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,721 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:57:48,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:48,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:48,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:48,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:48,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:48,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:48,727 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#292 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:48,727 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/8c86dbe3469645c09f220c097d286dc1 is 50, key is test_row_0/B:col10/1734371867410/Put/seqid=0 2024-12-16T17:57:48,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/736b467ac0754b0b93586a33185cea59 is 50, key is test_row_0/A:col10/1734371867482/Put/seqid=0 2024-12-16T17:57:48,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742162_1338 (size=13255) 2024-12-16T17:57:48,747 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/eb24edd01c624651b211233fd9c49596 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/eb24edd01c624651b211233fd9c49596 2024-12-16T17:57:48,753 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into eb24edd01c624651b211233fd9c49596(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:48,753 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:48,754 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=12, startTime=1734371868704; duration=0sec 2024-12-16T17:57:48,754 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:48,754 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:48,754 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:57:48,760 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:57:48,761 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:48,761 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:48,761 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9d0acdf8e57f416c8a8a682de34b59a9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/360ffb32fb814e7a826ea82632384f7c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b4481327c4464533ae1db1bef32d992b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1e141ca14e7345d895c239b13b126c01] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=46.5 K 2024-12-16T17:57:48,761 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d0acdf8e57f416c8a8a682de34b59a9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734371864199 2024-12-16T17:57:48,762 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 360ffb32fb814e7a826ea82632384f7c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1734371865374 2024-12-16T17:57:48,762 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4481327c4464533ae1db1bef32d992b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1734371866459 2024-12-16T17:57:48,762 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e141ca14e7345d895c239b13b126c01, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1734371867410 2024-12-16T17:57:48,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742163_1339 (size=13255) 2024-12-16T17:57:48,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742164_1340 (size=12301) 2024-12-16T17:57:48,774 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/736b467ac0754b0b93586a33185cea59 2024-12-16T17:57:48,777 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#294 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:48,778 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/1f0688c42e8a4994aede07484759022b is 50, key is test_row_0/C:col10/1734371867410/Put/seqid=0 2024-12-16T17:57:48,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/5453782c29284a68b09cd7a38660de44 is 50, key is test_row_0/B:col10/1734371867482/Put/seqid=0 2024-12-16T17:57:48,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742165_1341 (size=13255) 2024-12-16T17:57:48,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742166_1342 (size=12301) 2024-12-16T17:57:48,790 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/1f0688c42e8a4994aede07484759022b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1f0688c42e8a4994aede07484759022b 2024-12-16T17:57:48,795 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 1f0688c42e8a4994aede07484759022b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:48,795 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:48,795 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=12, startTime=1734371868704; duration=0sec 2024-12-16T17:57:48,796 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:48,796 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:49,174 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/8c86dbe3469645c09f220c097d286dc1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8c86dbe3469645c09f220c097d286dc1 2024-12-16T17:57:49,178 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into 8c86dbe3469645c09f220c097d286dc1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:49,178 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:49,178 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=12, startTime=1734371868704; duration=0sec 2024-12-16T17:57:49,178 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:49,178 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:49,188 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/5453782c29284a68b09cd7a38660de44 2024-12-16T17:57:49,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/e045a9b92f6840c78251007a23e9ae48 is 50, key is test_row_0/C:col10/1734371867482/Put/seqid=0 2024-12-16T17:57:49,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742167_1343 (size=12301) 2024-12-16T17:57:49,201 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/e045a9b92f6840c78251007a23e9ae48 2024-12-16T17:57:49,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/736b467ac0754b0b93586a33185cea59 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/736b467ac0754b0b93586a33185cea59 2024-12-16T17:57:49,209 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/736b467ac0754b0b93586a33185cea59, entries=150, sequenceid=428, filesize=12.0 K 2024-12-16T17:57:49,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/5453782c29284a68b09cd7a38660de44 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/5453782c29284a68b09cd7a38660de44 2024-12-16T17:57:49,214 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/5453782c29284a68b09cd7a38660de44, entries=150, sequenceid=428, filesize=12.0 K 2024-12-16T17:57:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/e045a9b92f6840c78251007a23e9ae48 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/e045a9b92f6840c78251007a23e9ae48 2024-12-16T17:57:49,219 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/e045a9b92f6840c78251007a23e9ae48, entries=150, sequenceid=428, filesize=12.0 K 2024-12-16T17:57:49,219 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 9edd584a765d2a226d81ae3095fa4916 in 498ms, sequenceid=428, compaction requested=false 2024-12-16T17:57:49,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:49,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:49,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-16T17:57:49,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-16T17:57:49,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-16T17:57:49,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7200 sec 2024-12-16T17:57:49,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.7240 sec 2024-12-16T17:57:49,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:49,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:49,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:49,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:49,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:49,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:49,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:49,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:49,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/c0c9239186c6495bad41b07dfb8f5342 is 50, key is test_row_0/A:col10/1734371869509/Put/seqid=0 2024-12-16T17:57:49,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742168_1344 (size=12301) 2024-12-16T17:57:49,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371929563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371929563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45454 deadline: 1734371929598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-16T17:57:49,603 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-16T17:57:49,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-16T17:57:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T17:57:49,609 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:49,610 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:49,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:49,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371929665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371929665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T17:57:49,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:49,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:49,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:49,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:49,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:49,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:49,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371929867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371929868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T17:57:49,913 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:49,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:49,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:49,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:49,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:49,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:49,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:49,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:49,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/c0c9239186c6495bad41b07dfb8f5342 2024-12-16T17:57:49,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/a461de328958426d8915274c668ce91c is 50, key is test_row_0/B:col10/1734371869509/Put/seqid=0 2024-12-16T17:57:49,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742169_1345 (size=12301) 2024-12-16T17:57:50,065 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:50,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:50,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:50,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371930170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:50,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371930170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T17:57:50,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:50,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:50,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/a461de328958426d8915274c668ce91c 2024-12-16T17:57:50,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/a1cf6a349c56478aa977a54fd6a979e0 is 50, key is test_row_0/C:col10/1734371869509/Put/seqid=0 2024-12-16T17:57:50,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:50,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:50,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742170_1346 (size=12301) 2024-12-16T17:57:50,523 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:50,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:50,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45484 deadline: 1734371930674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:50,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:50,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45486 deadline: 1734371930676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T17:57:50,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/a1cf6a349c56478aa977a54fd6a979e0 2024-12-16T17:57:50,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/c0c9239186c6495bad41b07dfb8f5342 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c0c9239186c6495bad41b07dfb8f5342 2024-12-16T17:57:50,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c0c9239186c6495bad41b07dfb8f5342, entries=150, sequenceid=442, filesize=12.0 K 2024-12-16T17:57:50,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/a461de328958426d8915274c668ce91c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a461de328958426d8915274c668ce91c 2024-12-16T17:57:50,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a461de328958426d8915274c668ce91c, entries=150, sequenceid=442, filesize=12.0 K 2024-12-16T17:57:50,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/a1cf6a349c56478aa977a54fd6a979e0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/a1cf6a349c56478aa977a54fd6a979e0 2024-12-16T17:57:50,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/a1cf6a349c56478aa977a54fd6a979e0, entries=150, sequenceid=442, filesize=12.0 K 2024-12-16T17:57:50,828 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9edd584a765d2a226d81ae3095fa4916 in 1316ms, sequenceid=442, compaction requested=true 2024-12-16T17:57:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:57:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:50,828 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:50,828 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:57:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9edd584a765d2a226d81ae3095fa4916:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:57:50,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:50,830 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:50,830 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:50,830 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/B is initiating minor compaction (all files) 2024-12-16T17:57:50,830 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/A is initiating minor compaction (all files) 2024-12-16T17:57:50,830 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/B in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,830 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/A in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,831 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/eb24edd01c624651b211233fd9c49596, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/736b467ac0754b0b93586a33185cea59, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c0c9239186c6495bad41b07dfb8f5342] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=37.0 K 2024-12-16T17:57:50,831 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8c86dbe3469645c09f220c097d286dc1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/5453782c29284a68b09cd7a38660de44, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a461de328958426d8915274c668ce91c] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=37.0 K 2024-12-16T17:57:50,831 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c86dbe3469645c09f220c097d286dc1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1734371867410 2024-12-16T17:57:50,831 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb24edd01c624651b211233fd9c49596, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1734371867410 2024-12-16T17:57:50,831 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 736b467ac0754b0b93586a33185cea59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371867471 2024-12-16T17:57:50,831 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 5453782c29284a68b09cd7a38660de44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371867471 2024-12-16T17:57:50,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:50,832 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0c9239186c6495bad41b07dfb8f5342, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1734371869504 2024-12-16T17:57:50,832 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting a461de328958426d8915274c668ce91c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1734371869504 2024-12-16T17:57:50,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-16T17:57:50,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,832 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:57:50,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:50,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:50,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:50,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:50,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:50,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:50,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b4ca799a0a364707915c8e7966351c54 is 50, key is test_row_0/A:col10/1734371869562/Put/seqid=0 2024-12-16T17:57:50,854 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#B#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:50,854 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/75c20fd7994d4b81b097a07a33c34377 is 50, key is test_row_0/B:col10/1734371869509/Put/seqid=0 2024-12-16T17:57:50,862 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#A#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:50,862 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/883761bb055b4a8c9a70bd6ebcee75df is 50, key is test_row_0/A:col10/1734371869509/Put/seqid=0 2024-12-16T17:57:50,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742171_1347 (size=13357) 2024-12-16T17:57:50,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742172_1348 (size=13357) 2024-12-16T17:57:50,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742173_1349 (size=12301) 2024-12-16T17:57:50,882 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/883761bb055b4a8c9a70bd6ebcee75df as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/883761bb055b4a8c9a70bd6ebcee75df 2024-12-16T17:57:50,887 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/A of 9edd584a765d2a226d81ae3095fa4916 into 883761bb055b4a8c9a70bd6ebcee75df(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:50,887 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:50,887 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/A, priority=13, startTime=1734371870828; duration=0sec 2024-12-16T17:57:50,887 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:57:50,887 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:A 2024-12-16T17:57:50,887 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:57:50,888 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:57:50,888 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 9edd584a765d2a226d81ae3095fa4916/C is initiating minor compaction (all files) 2024-12-16T17:57:50,888 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9edd584a765d2a226d81ae3095fa4916/C in TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:50,888 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1f0688c42e8a4994aede07484759022b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/e045a9b92f6840c78251007a23e9ae48, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/a1cf6a349c56478aa977a54fd6a979e0] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp, totalSize=37.0 K 2024-12-16T17:57:50,889 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f0688c42e8a4994aede07484759022b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1734371867410 2024-12-16T17:57:50,889 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e045a9b92f6840c78251007a23e9ae48, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=428, earliestPutTs=1734371867471 2024-12-16T17:57:50,889 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1cf6a349c56478aa977a54fd6a979e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1734371869504 2024-12-16T17:57:50,894 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9edd584a765d2a226d81ae3095fa4916#C#compaction#303 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:57:50,895 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/78fd7884c4b249c2a7e1547cc0d5e3b6 is 50, key is test_row_0/C:col10/1734371869509/Put/seqid=0 2024-12-16T17:57:50,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742174_1350 (size=13357) 2024-12-16T17:57:51,097 DEBUG [Thread-1172 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1359ecb3 to 127.0.0.1:49190 2024-12-16T17:57:51,097 DEBUG [Thread-1176 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49d7f41f to 127.0.0.1:49190 2024-12-16T17:57:51,097 DEBUG [Thread-1180 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62a8cb31 to 127.0.0.1:49190 2024-12-16T17:57:51,097 DEBUG [Thread-1176 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,097 DEBUG [Thread-1180 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,097 DEBUG [Thread-1172 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,098 DEBUG [Thread-1178 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x71eeb576 to 127.0.0.1:49190 2024-12-16T17:57:51,098 DEBUG [Thread-1178 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,098 DEBUG [Thread-1174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3838b5fe to 127.0.0.1:49190 2024-12-16T17:57:51,098 DEBUG [Thread-1174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,279 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b4ca799a0a364707915c8e7966351c54 2024-12-16T17:57:51,281 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/75c20fd7994d4b81b097a07a33c34377 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/75c20fd7994d4b81b097a07a33c34377 2024-12-16T17:57:51,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/74196722915b40318e6732f85510c3ae is 50, key is test_row_0/B:col10/1734371869562/Put/seqid=0 2024-12-16T17:57:51,284 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/B of 9edd584a765d2a226d81ae3095fa4916 into 75c20fd7994d4b81b097a07a33c34377(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:51,284 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:51,284 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/B, priority=13, startTime=1734371870828; duration=0sec 2024-12-16T17:57:51,285 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:51,285 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:B 2024-12-16T17:57:51,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742175_1351 (size=12301) 2024-12-16T17:57:51,302 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/78fd7884c4b249c2a7e1547cc0d5e3b6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/78fd7884c4b249c2a7e1547cc0d5e3b6 2024-12-16T17:57:51,306 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9edd584a765d2a226d81ae3095fa4916/C of 9edd584a765d2a226d81ae3095fa4916 into 78fd7884c4b249c2a7e1547cc0d5e3b6(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:57:51,306 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:51,306 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916., storeName=9edd584a765d2a226d81ae3095fa4916/C, priority=13, startTime=1734371870828; duration=0sec 2024-12-16T17:57:51,306 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:57:51,306 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9edd584a765d2a226d81ae3095fa4916:C 2024-12-16T17:57:51,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:51,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. as already flushing 2024-12-16T17:57:51,609 DEBUG [Thread-1163 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x55db8dd7 to 127.0.0.1:49190 2024-12-16T17:57:51,609 DEBUG [Thread-1163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,684 DEBUG [Thread-1161 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65cb9e20 to 127.0.0.1:49190 2024-12-16T17:57:51,684 DEBUG [Thread-1161 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,685 DEBUG [Thread-1169 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x651a82a0 to 127.0.0.1:49190 2024-12-16T17:57:51,685 DEBUG [Thread-1169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:51,688 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/74196722915b40318e6732f85510c3ae 2024-12-16T17:57:51,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/abd65d5b4c884366b611ceabc6447dad is 50, key is test_row_0/C:col10/1734371869562/Put/seqid=0 2024-12-16T17:57:51,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742176_1352 (size=12301) 2024-12-16T17:57:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T17:57:52,102 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/abd65d5b4c884366b611ceabc6447dad 2024-12-16T17:57:52,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/b4ca799a0a364707915c8e7966351c54 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b4ca799a0a364707915c8e7966351c54 2024-12-16T17:57:52,120 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b4ca799a0a364707915c8e7966351c54, entries=150, sequenceid=468, filesize=12.0 K 2024-12-16T17:57:52,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/74196722915b40318e6732f85510c3ae as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/74196722915b40318e6732f85510c3ae 2024-12-16T17:57:52,126 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/74196722915b40318e6732f85510c3ae, entries=150, sequenceid=468, filesize=12.0 K 2024-12-16T17:57:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/abd65d5b4c884366b611ceabc6447dad as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/abd65d5b4c884366b611ceabc6447dad 2024-12-16T17:57:52,130 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/abd65d5b4c884366b611ceabc6447dad, entries=150, sequenceid=468, filesize=12.0 K 2024-12-16T17:57:52,131 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=20.13 KB/20610 for 9edd584a765d2a226d81ae3095fa4916 in 1299ms, sequenceid=468, compaction requested=false 2024-12-16T17:57:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-16T17:57:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-16T17:57:52,133 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-16T17:57:52,133 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5220 sec 2024-12-16T17:57:52,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.5300 sec 2024-12-16T17:57:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T17:57:53,713 INFO [Thread-1171 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-16T17:57:54,003 DEBUG [Thread-1167 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x567fe249 to 127.0.0.1:49190 2024-12-16T17:57:54,003 DEBUG [Thread-1167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:54,083 DEBUG [Thread-1165 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x094b9cbe to 127.0.0.1:49190 2024-12-16T17:57:54,083 DEBUG [Thread-1165 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 127 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7040 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7167 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6926 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7024 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7145 2024-12-16T17:57:54,083 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-16T17:57:54,083 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T17:57:54,083 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ec51c1b to 127.0.0.1:49190 2024-12-16T17:57:54,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:57:54,084 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-16T17:57:54,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-16T17:57:54,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:54,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-16T17:57:54,086 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371874086"}]},"ts":"1734371874086"} 2024-12-16T17:57:54,087 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-16T17:57:54,113 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-16T17:57:54,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:57:54,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916, UNASSIGN}] 2024-12-16T17:57:54,115 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916, UNASSIGN 2024-12-16T17:57:54,116 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=9edd584a765d2a226d81ae3095fa4916, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:54,117 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:57:54,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; CloseRegionProcedure 9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-16T17:57:54,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:54,269 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(124): Close 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:54,269 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:57:54,269 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1681): Closing 9edd584a765d2a226d81ae3095fa4916, disabling compactions & flushes 2024-12-16T17:57:54,269 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:54,269 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:54,269 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. after waiting 0 ms 2024-12-16T17:57:54,269 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:54,269 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(2837): Flushing 9edd584a765d2a226d81ae3095fa4916 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-16T17:57:54,270 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=A 2024-12-16T17:57:54,270 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:54,270 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=B 2024-12-16T17:57:54,270 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:54,270 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9edd584a765d2a226d81ae3095fa4916, store=C 2024-12-16T17:57:54,270 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:54,274 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/5c913f96bc0d415fb4b54242d131f8f0 is 50, key is test_row_0/A:col10/1734371871681/Put/seqid=0 2024-12-16T17:57:54,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742177_1353 (size=12301) 2024-12-16T17:57:54,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-16T17:57:54,679 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/5c913f96bc0d415fb4b54242d131f8f0 2024-12-16T17:57:54,685 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7f2a6982a3724e3fa43002725e731627 is 50, key is test_row_0/B:col10/1734371871681/Put/seqid=0 2024-12-16T17:57:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-16T17:57:54,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742178_1354 (size=12301) 2024-12-16T17:57:55,089 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7f2a6982a3724e3fa43002725e731627 2024-12-16T17:57:55,095 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/dcee915ab09640afbd67eb3bfc137b9e is 50, key is test_row_0/C:col10/1734371871681/Put/seqid=0 2024-12-16T17:57:55,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742179_1355 (size=12301) 2024-12-16T17:57:55,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-16T17:57:55,499 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/dcee915ab09640afbd67eb3bfc137b9e 2024-12-16T17:57:55,503 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/A/5c913f96bc0d415fb4b54242d131f8f0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5c913f96bc0d415fb4b54242d131f8f0 2024-12-16T17:57:55,508 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5c913f96bc0d415fb4b54242d131f8f0, entries=150, sequenceid=479, filesize=12.0 K 2024-12-16T17:57:55,509 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/B/7f2a6982a3724e3fa43002725e731627 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7f2a6982a3724e3fa43002725e731627 2024-12-16T17:57:55,512 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7f2a6982a3724e3fa43002725e731627, entries=150, sequenceid=479, filesize=12.0 K 2024-12-16T17:57:55,513 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/.tmp/C/dcee915ab09640afbd67eb3bfc137b9e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/dcee915ab09640afbd67eb3bfc137b9e 2024-12-16T17:57:55,516 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/dcee915ab09640afbd67eb3bfc137b9e, entries=150, sequenceid=479, filesize=12.0 K 2024-12-16T17:57:55,517 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 9edd584a765d2a226d81ae3095fa4916 in 1248ms, sequenceid=479, compaction requested=true 2024-12-16T17:57:55,517 DEBUG [StoreCloser-TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d8c330f2b8b146708621caf329c5f1db, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/aebe984fc5ec4a5fbbd4060edacc4bdc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ff0e0852c9ef43a1b0a86fe9a7998e00, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ededcf6667c544aba64455be628d26c4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/090efc5c96494f04a3a3b0c03ada981a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/86cdfcd5b50544368b721664d3695a24, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0b4ad3bd36414bdf9dea359392699961, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d9efa881ce15457a830a3cbb6f569400, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0d26f1108c444d76b2ff5bb43c127458, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5147d5cddff041fab1c52700c0422ef0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b2c80ed673994ed98011715ef421612c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/6289fec02d764918a743798704c5d3e6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/df12539b3ad0476c960a5df2afcc043e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/edc8531fd19841ce88fbd9d9d84bffed, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9cb89fd772694309b6e1711fbeca756f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/45dc969387eb488b94a27699e9d0ec80, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b11d7a7e1143462aa782022df054b4c8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f854a9e208b941ccafc3f0cb3cad46f4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/163f14959f6448d7b0df2252e5a9d2ea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/3b69673bc32244ac9bbc02d3e38837f9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/e9748a47b8b54251beadee6f5240470f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/7b27faf4cdd640a6a30ef945d439bfe1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9a2426b80f9446ddac577f0165208c42, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d987114fae5148eba8c369b0d9e31847, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d00f32e57edb4ad88375f6de7b405f7b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c30b1fbfbdd54b768c0879ce8f661472, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/eb24edd01c624651b211233fd9c49596, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f609b536080347aa8c03be0b56810506, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/736b467ac0754b0b93586a33185cea59, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c0c9239186c6495bad41b07dfb8f5342] to archive 2024-12-16T17:57:55,518 DEBUG [StoreCloser-TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:55,520 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ededcf6667c544aba64455be628d26c4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ededcf6667c544aba64455be628d26c4 2024-12-16T17:57:55,520 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d8c330f2b8b146708621caf329c5f1db to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d8c330f2b8b146708621caf329c5f1db 2024-12-16T17:57:55,520 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/cb3769fb7a554a47ad0a6ee7bb9aa3dd 2024-12-16T17:57:55,520 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/aebe984fc5ec4a5fbbd4060edacc4bdc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/aebe984fc5ec4a5fbbd4060edacc4bdc 2024-12-16T17:57:55,521 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ff0e0852c9ef43a1b0a86fe9a7998e00 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/ff0e0852c9ef43a1b0a86fe9a7998e00 2024-12-16T17:57:55,521 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/86cdfcd5b50544368b721664d3695a24 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/86cdfcd5b50544368b721664d3695a24 2024-12-16T17:57:55,521 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/090efc5c96494f04a3a3b0c03ada981a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/090efc5c96494f04a3a3b0c03ada981a 2024-12-16T17:57:55,521 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0b4ad3bd36414bdf9dea359392699961 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0b4ad3bd36414bdf9dea359392699961 2024-12-16T17:57:55,522 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/6289fec02d764918a743798704c5d3e6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/6289fec02d764918a743798704c5d3e6 2024-12-16T17:57:55,522 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d9efa881ce15457a830a3cbb6f569400 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d9efa881ce15457a830a3cbb6f569400 2024-12-16T17:57:55,522 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0d26f1108c444d76b2ff5bb43c127458 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/0d26f1108c444d76b2ff5bb43c127458 2024-12-16T17:57:55,522 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5147d5cddff041fab1c52700c0422ef0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5147d5cddff041fab1c52700c0422ef0 2024-12-16T17:57:55,522 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/df12539b3ad0476c960a5df2afcc043e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/df12539b3ad0476c960a5df2afcc043e 2024-12-16T17:57:55,522 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b2c80ed673994ed98011715ef421612c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b2c80ed673994ed98011715ef421612c 2024-12-16T17:57:55,522 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/edc8531fd19841ce88fbd9d9d84bffed to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/edc8531fd19841ce88fbd9d9d84bffed 2024-12-16T17:57:55,523 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9cb89fd772694309b6e1711fbeca756f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9cb89fd772694309b6e1711fbeca756f 2024-12-16T17:57:55,523 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/3b69673bc32244ac9bbc02d3e38837f9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/3b69673bc32244ac9bbc02d3e38837f9 2024-12-16T17:57:55,523 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/45dc969387eb488b94a27699e9d0ec80 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/45dc969387eb488b94a27699e9d0ec80 2024-12-16T17:57:55,524 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/163f14959f6448d7b0df2252e5a9d2ea to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/163f14959f6448d7b0df2252e5a9d2ea 2024-12-16T17:57:55,524 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/e9748a47b8b54251beadee6f5240470f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/e9748a47b8b54251beadee6f5240470f 2024-12-16T17:57:55,524 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f854a9e208b941ccafc3f0cb3cad46f4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f854a9e208b941ccafc3f0cb3cad46f4 2024-12-16T17:57:55,524 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9a2426b80f9446ddac577f0165208c42 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/9a2426b80f9446ddac577f0165208c42 2024-12-16T17:57:55,524 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b11d7a7e1143462aa782022df054b4c8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b11d7a7e1143462aa782022df054b4c8 2024-12-16T17:57:55,525 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/7b27faf4cdd640a6a30ef945d439bfe1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/7b27faf4cdd640a6a30ef945d439bfe1 2024-12-16T17:57:55,525 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d987114fae5148eba8c369b0d9e31847 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d987114fae5148eba8c369b0d9e31847 2024-12-16T17:57:55,526 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d00f32e57edb4ad88375f6de7b405f7b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/d00f32e57edb4ad88375f6de7b405f7b 2024-12-16T17:57:55,526 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c30b1fbfbdd54b768c0879ce8f661472 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c30b1fbfbdd54b768c0879ce8f661472 2024-12-16T17:57:55,526 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/eb24edd01c624651b211233fd9c49596 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/eb24edd01c624651b211233fd9c49596 2024-12-16T17:57:55,526 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/736b467ac0754b0b93586a33185cea59 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/736b467ac0754b0b93586a33185cea59 2024-12-16T17:57:55,526 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c0c9239186c6495bad41b07dfb8f5342 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/c0c9239186c6495bad41b07dfb8f5342 2024-12-16T17:57:55,526 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f609b536080347aa8c03be0b56810506 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/f609b536080347aa8c03be0b56810506 2024-12-16T17:57:55,527 DEBUG [StoreCloser-TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/2e84d862af4642caa0fea02e3ed71b5f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e3db51af415c4c82a3b564f36ed6fc52, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/69b84cb11a014304979fbdf85cf3667f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/390a157c70f143e492441f1ce599e703, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/d42e6255ca504492ad3598ef2dc7482e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/29070dccd7c6430a86162bd0c570a598, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7a1d7cb97b6f410ba0602eb41f8ba647, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7d58f837bb99457a8828c40f6abcb04d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ccb92aac72354daebd36f8c213020d6a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e6d16fe9711740b89e9195b91fe54650, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/83a748fc859349c3a759c32466de4793, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/4d4f4c8649454fe49b6563f5e0a47a70, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/798b1d134cbc4b8aa92704ae49cd3d3f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ce3a0ccaa8f44d8c8e9fcff7dd8e0353, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a84e3508a9b941889c19de3f73369ebf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ee0463a44e2d43c09e26a508f369adc3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/6bdc432834464006827d507e3ca887a6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/9553250f5ffd4b1a828dee0e8e6842b7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8fbe6a980a704242825fab6bfa36f48b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/76d6710c5f0146baa12348d2788626a8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7df85e88d0f7442284e56ab807a250d2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/f767d2bea540449ba3f4c73b747c58ab, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/844b3937a1c543009d17f7f1f7369509, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/c511b37367cf4bbe982f3927cd73511e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/482cf643da9b4f0eaf5cd44fb2bf9aa7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/598dba38cc67414bb2bc2a0f6de7b333, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/320b6bc898a54e5996b38d884cf046e6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8c86dbe3469645c09f220c097d286dc1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/77b0b2b602954d7584fcb2fd70ea7de2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/5453782c29284a68b09cd7a38660de44, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a461de328958426d8915274c668ce91c] to archive 2024-12-16T17:57:55,528 DEBUG [StoreCloser-TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:55,530 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e3db51af415c4c82a3b564f36ed6fc52 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e3db51af415c4c82a3b564f36ed6fc52 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/d42e6255ca504492ad3598ef2dc7482e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/d42e6255ca504492ad3598ef2dc7482e 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/2e84d862af4642caa0fea02e3ed71b5f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/2e84d862af4642caa0fea02e3ed71b5f 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/390a157c70f143e492441f1ce599e703 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/390a157c70f143e492441f1ce599e703 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/29070dccd7c6430a86162bd0c570a598 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/29070dccd7c6430a86162bd0c570a598 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/69b84cb11a014304979fbdf85cf3667f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/69b84cb11a014304979fbdf85cf3667f 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7d58f837bb99457a8828c40f6abcb04d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7d58f837bb99457a8828c40f6abcb04d 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7a1d7cb97b6f410ba0602eb41f8ba647 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7a1d7cb97b6f410ba0602eb41f8ba647 2024-12-16T17:57:55,531 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ccb92aac72354daebd36f8c213020d6a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ccb92aac72354daebd36f8c213020d6a 2024-12-16T17:57:55,532 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e6d16fe9711740b89e9195b91fe54650 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/e6d16fe9711740b89e9195b91fe54650 2024-12-16T17:57:55,532 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/83a748fc859349c3a759c32466de4793 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/83a748fc859349c3a759c32466de4793 2024-12-16T17:57:55,532 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ce3a0ccaa8f44d8c8e9fcff7dd8e0353 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ce3a0ccaa8f44d8c8e9fcff7dd8e0353 2024-12-16T17:57:55,532 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/4d4f4c8649454fe49b6563f5e0a47a70 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/4d4f4c8649454fe49b6563f5e0a47a70 2024-12-16T17:57:55,532 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a84e3508a9b941889c19de3f73369ebf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a84e3508a9b941889c19de3f73369ebf 2024-12-16T17:57:55,532 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/798b1d134cbc4b8aa92704ae49cd3d3f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/798b1d134cbc4b8aa92704ae49cd3d3f 2024-12-16T17:57:55,533 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ee0463a44e2d43c09e26a508f369adc3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/ee0463a44e2d43c09e26a508f369adc3 2024-12-16T17:57:55,533 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/6bdc432834464006827d507e3ca887a6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/6bdc432834464006827d507e3ca887a6 2024-12-16T17:57:55,533 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/9553250f5ffd4b1a828dee0e8e6842b7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/9553250f5ffd4b1a828dee0e8e6842b7 2024-12-16T17:57:55,534 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/76d6710c5f0146baa12348d2788626a8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/76d6710c5f0146baa12348d2788626a8 2024-12-16T17:57:55,534 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/f767d2bea540449ba3f4c73b747c58ab to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/f767d2bea540449ba3f4c73b747c58ab 2024-12-16T17:57:55,534 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8fbe6a980a704242825fab6bfa36f48b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8fbe6a980a704242825fab6bfa36f48b 2024-12-16T17:57:55,534 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/844b3937a1c543009d17f7f1f7369509 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/844b3937a1c543009d17f7f1f7369509 2024-12-16T17:57:55,534 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7df85e88d0f7442284e56ab807a250d2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7df85e88d0f7442284e56ab807a250d2 2024-12-16T17:57:55,534 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/482cf643da9b4f0eaf5cd44fb2bf9aa7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/482cf643da9b4f0eaf5cd44fb2bf9aa7 2024-12-16T17:57:55,534 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/c511b37367cf4bbe982f3927cd73511e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/c511b37367cf4bbe982f3927cd73511e 2024-12-16T17:57:55,535 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/598dba38cc67414bb2bc2a0f6de7b333 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/598dba38cc67414bb2bc2a0f6de7b333 2024-12-16T17:57:55,535 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/320b6bc898a54e5996b38d884cf046e6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/320b6bc898a54e5996b38d884cf046e6 2024-12-16T17:57:55,535 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8c86dbe3469645c09f220c097d286dc1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/8c86dbe3469645c09f220c097d286dc1 2024-12-16T17:57:55,535 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/77b0b2b602954d7584fcb2fd70ea7de2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/77b0b2b602954d7584fcb2fd70ea7de2 2024-12-16T17:57:55,535 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/5453782c29284a68b09cd7a38660de44 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/5453782c29284a68b09cd7a38660de44 2024-12-16T17:57:55,535 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a461de328958426d8915274c668ce91c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/a461de328958426d8915274c668ce91c 2024-12-16T17:57:55,536 DEBUG [StoreCloser-TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b22197d3e7aa43c787bfe959c47580bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/ee7ec8f49a99418e9772925db6304cc2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70e586f8731f4726a8572a43ae6758eb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70d106120a764da5a6f4746be14078fb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/d695ad09dd7f4b20a8c737285fdf6989, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/0b729069623444e4a44757eb00b79c68, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/792b6d3bdea642cb966aec965256d2c1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5788755821084a2091b31a6b1564fe58, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/04beb5edf4fb48d9b2c10ca861ad8885, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/918470d694f74f93bd48c746f6a8aa1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5606ff8b0bff4b6ba9335f56cfb00bc3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5a54cfce3eb040778deda37272c5a79b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/389f6513679c4c0ba89e791b25ceb8bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/595f76a63fda4ee3940e2a21d1fcc3e0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/da4c7dc3266c4eb298b341abfcb7d10c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9672b275fd17443a9333307601bcf329, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/fde1ef3e633045e8be14cc3d0e2c6196, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/095a780f6e0c48b4a905ed5c12f1da55, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/6b41f05d800749ac8b0f822120c2bc6f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/aec5289a7b48477f9919f0da3e6523db, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/35bfb5128a414adc9736480f8dd7b762, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/3b0ca63b404f441ba0092f577479dd1a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/739d7f17b6ec46b0bd0022544ba0a117, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9d0acdf8e57f416c8a8a682de34b59a9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/26fbdc55e7f7400c90eda639d12f6796, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/360ffb32fb814e7a826ea82632384f7c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b4481327c4464533ae1db1bef32d992b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1f0688c42e8a4994aede07484759022b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1e141ca14e7345d895c239b13b126c01, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/e045a9b92f6840c78251007a23e9ae48, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/a1cf6a349c56478aa977a54fd6a979e0] to archive 2024-12-16T17:57:55,537 DEBUG [StoreCloser-TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:57:55,538 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70e586f8731f4726a8572a43ae6758eb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70e586f8731f4726a8572a43ae6758eb 2024-12-16T17:57:55,538 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b22197d3e7aa43c787bfe959c47580bf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b22197d3e7aa43c787bfe959c47580bf 2024-12-16T17:57:55,539 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/d695ad09dd7f4b20a8c737285fdf6989 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/d695ad09dd7f4b20a8c737285fdf6989 2024-12-16T17:57:55,539 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/0b729069623444e4a44757eb00b79c68 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/0b729069623444e4a44757eb00b79c68 2024-12-16T17:57:55,539 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/ee7ec8f49a99418e9772925db6304cc2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/ee7ec8f49a99418e9772925db6304cc2 2024-12-16T17:57:55,539 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70d106120a764da5a6f4746be14078fb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/70d106120a764da5a6f4746be14078fb 2024-12-16T17:57:55,539 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5788755821084a2091b31a6b1564fe58 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5788755821084a2091b31a6b1564fe58 2024-12-16T17:57:55,539 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/792b6d3bdea642cb966aec965256d2c1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/792b6d3bdea642cb966aec965256d2c1 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/04beb5edf4fb48d9b2c10ca861ad8885 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/04beb5edf4fb48d9b2c10ca861ad8885 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/918470d694f74f93bd48c746f6a8aa1b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/918470d694f74f93bd48c746f6a8aa1b 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/da4c7dc3266c4eb298b341abfcb7d10c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/da4c7dc3266c4eb298b341abfcb7d10c 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5606ff8b0bff4b6ba9335f56cfb00bc3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5606ff8b0bff4b6ba9335f56cfb00bc3 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/595f76a63fda4ee3940e2a21d1fcc3e0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/595f76a63fda4ee3940e2a21d1fcc3e0 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/389f6513679c4c0ba89e791b25ceb8bf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/389f6513679c4c0ba89e791b25ceb8bf 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5a54cfce3eb040778deda37272c5a79b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/5a54cfce3eb040778deda37272c5a79b 2024-12-16T17:57:55,540 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9672b275fd17443a9333307601bcf329 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9672b275fd17443a9333307601bcf329 2024-12-16T17:57:55,541 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/095a780f6e0c48b4a905ed5c12f1da55 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/095a780f6e0c48b4a905ed5c12f1da55 2024-12-16T17:57:55,542 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/aec5289a7b48477f9919f0da3e6523db to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/aec5289a7b48477f9919f0da3e6523db 2024-12-16T17:57:55,542 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/fde1ef3e633045e8be14cc3d0e2c6196 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/fde1ef3e633045e8be14cc3d0e2c6196 2024-12-16T17:57:55,542 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/6b41f05d800749ac8b0f822120c2bc6f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/6b41f05d800749ac8b0f822120c2bc6f 2024-12-16T17:57:55,542 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/3b0ca63b404f441ba0092f577479dd1a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/3b0ca63b404f441ba0092f577479dd1a 2024-12-16T17:57:55,542 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9d0acdf8e57f416c8a8a682de34b59a9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/9d0acdf8e57f416c8a8a682de34b59a9 2024-12-16T17:57:55,542 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/739d7f17b6ec46b0bd0022544ba0a117 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/739d7f17b6ec46b0bd0022544ba0a117 2024-12-16T17:57:55,542 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/35bfb5128a414adc9736480f8dd7b762 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/35bfb5128a414adc9736480f8dd7b762 2024-12-16T17:57:55,543 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/26fbdc55e7f7400c90eda639d12f6796 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/26fbdc55e7f7400c90eda639d12f6796 2024-12-16T17:57:55,543 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/360ffb32fb814e7a826ea82632384f7c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/360ffb32fb814e7a826ea82632384f7c 2024-12-16T17:57:55,543 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b4481327c4464533ae1db1bef32d992b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/b4481327c4464533ae1db1bef32d992b 2024-12-16T17:57:55,544 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1f0688c42e8a4994aede07484759022b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1f0688c42e8a4994aede07484759022b 2024-12-16T17:57:55,544 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1e141ca14e7345d895c239b13b126c01 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/1e141ca14e7345d895c239b13b126c01 2024-12-16T17:57:55,544 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/a1cf6a349c56478aa977a54fd6a979e0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/a1cf6a349c56478aa977a54fd6a979e0 2024-12-16T17:57:55,544 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/e045a9b92f6840c78251007a23e9ae48 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/e045a9b92f6840c78251007a23e9ae48 2024-12-16T17:57:55,548 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/recovered.edits/482.seqid, newMaxSeqId=482, maxSeqId=1 2024-12-16T17:57:55,549 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916. 2024-12-16T17:57:55,549 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1635): Region close journal for 9edd584a765d2a226d81ae3095fa4916: 2024-12-16T17:57:55,550 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(170): Closed 9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:55,550 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=9edd584a765d2a226d81ae3095fa4916, regionState=CLOSED 2024-12-16T17:57:55,551 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-16T17:57:55,551 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; CloseRegionProcedure 9edd584a765d2a226d81ae3095fa4916, server=3609ad07831c,39733,1734371789085 in 1.4340 sec 2024-12-16T17:57:55,552 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=84 2024-12-16T17:57:55,552 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=84, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9edd584a765d2a226d81ae3095fa4916, UNASSIGN in 1.4360 sec 2024-12-16T17:57:55,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-16T17:57:55,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4380 sec 2024-12-16T17:57:55,554 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371875554"}]},"ts":"1734371875554"} 2024-12-16T17:57:55,555 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-16T17:57:55,563 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-16T17:57:55,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4790 sec 2024-12-16T17:57:56,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-16T17:57:56,190 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-16T17:57:56,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-16T17:57:56,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:56,191 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:56,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-16T17:57:56,192 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=87, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:56,193 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:56,195 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/recovered.edits] 2024-12-16T17:57:56,198 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5c913f96bc0d415fb4b54242d131f8f0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/5c913f96bc0d415fb4b54242d131f8f0 2024-12-16T17:57:56,198 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/883761bb055b4a8c9a70bd6ebcee75df to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/883761bb055b4a8c9a70bd6ebcee75df 2024-12-16T17:57:56,199 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b4ca799a0a364707915c8e7966351c54 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/A/b4ca799a0a364707915c8e7966351c54 2024-12-16T17:57:56,201 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/74196722915b40318e6732f85510c3ae to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/74196722915b40318e6732f85510c3ae 2024-12-16T17:57:56,201 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/75c20fd7994d4b81b097a07a33c34377 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/75c20fd7994d4b81b097a07a33c34377 2024-12-16T17:57:56,201 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7f2a6982a3724e3fa43002725e731627 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/B/7f2a6982a3724e3fa43002725e731627 2024-12-16T17:57:56,203 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/78fd7884c4b249c2a7e1547cc0d5e3b6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/78fd7884c4b249c2a7e1547cc0d5e3b6 2024-12-16T17:57:56,203 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/dcee915ab09640afbd67eb3bfc137b9e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/dcee915ab09640afbd67eb3bfc137b9e 2024-12-16T17:57:56,203 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/abd65d5b4c884366b611ceabc6447dad to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/C/abd65d5b4c884366b611ceabc6447dad 2024-12-16T17:57:56,204 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/recovered.edits/482.seqid to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916/recovered.edits/482.seqid 2024-12-16T17:57:56,205 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/9edd584a765d2a226d81ae3095fa4916 2024-12-16T17:57:56,205 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-16T17:57:56,206 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=87, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:56,210 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-16T17:57:56,211 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-16T17:57:56,213 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=87, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:56,213 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-16T17:57:56,213 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734371876213"}]},"ts":"9223372036854775807"} 2024-12-16T17:57:56,214 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T17:57:56,214 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9edd584a765d2a226d81ae3095fa4916, NAME => 'TestAcidGuarantees,,1734371848597.9edd584a765d2a226d81ae3095fa4916.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T17:57:56,214 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-16T17:57:56,214 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734371876214"}]},"ts":"9223372036854775807"} 2024-12-16T17:57:56,216 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-16T17:57:56,230 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=87, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:56,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 40 msec 2024-12-16T17:57:56,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-16T17:57:56,293 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-16T17:57:56,304 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=245 (was 248), OpenFileDescriptor=452 (was 464), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=539 (was 451) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3191 (was 3329) 2024-12-16T17:57:56,312 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=539, ProcessCount=11, AvailableMemoryMB=3191 2024-12-16T17:57:56,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:57:56,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:57:56,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:56,314 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T17:57:56,315 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:56,315 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 88 2024-12-16T17:57:56,315 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T17:57:56,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-16T17:57:56,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742180_1356 (size=963) 2024-12-16T17:57:56,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-16T17:57:56,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-16T17:57:56,724 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:57:56,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742181_1357 (size=53) 2024-12-16T17:57:56,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-16T17:57:57,135 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:57,135 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5e6b04d28ec6af2428036942def2f402, disabling compactions & flushes 2024-12-16T17:57:57,135 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,135 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,135 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. after waiting 0 ms 2024-12-16T17:57:57,135 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,135 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,135 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:57:57,137 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T17:57:57,137 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734371877137"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734371877137"}]},"ts":"1734371877137"} 2024-12-16T17:57:57,139 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T17:57:57,140 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T17:57:57,140 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371877140"}]},"ts":"1734371877140"} 2024-12-16T17:57:57,141 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-16T17:57:57,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, ASSIGN}] 2024-12-16T17:57:57,251 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, ASSIGN 2024-12-16T17:57:57,253 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, ASSIGN; state=OFFLINE, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=false 2024-12-16T17:57:57,404 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:57,405 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; OpenRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:57,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-16T17:57:57,535 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T17:57:57,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:57,562 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,562 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7285): Opening region: {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:57:57,563 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,563 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:57,563 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7327): checking encryption for 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,563 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7330): checking classloading for 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,565 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,567 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:57,568 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e6b04d28ec6af2428036942def2f402 columnFamilyName A 2024-12-16T17:57:57,568 DEBUG [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:57,569 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(327): Store=5e6b04d28ec6af2428036942def2f402/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:57,569 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,571 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:57,571 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e6b04d28ec6af2428036942def2f402 columnFamilyName B 2024-12-16T17:57:57,571 DEBUG [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:57,571 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(327): Store=5e6b04d28ec6af2428036942def2f402/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:57,571 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,572 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:57,573 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e6b04d28ec6af2428036942def2f402 columnFamilyName C 2024-12-16T17:57:57,573 DEBUG [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:57,573 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(327): Store=5e6b04d28ec6af2428036942def2f402/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:57,573 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,574 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,574 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,575 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:57:57,576 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1085): writing seq id for 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:57,578 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:57:57,578 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1102): Opened 5e6b04d28ec6af2428036942def2f402; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64422920, jitterRate=-0.04002368450164795}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:57:57,578 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1001): Region open journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:57:57,579 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., pid=90, masterSystemTime=1734371877557 2024-12-16T17:57:57,580 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,580 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:57,580 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:57,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-16T17:57:57,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; OpenRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 in 176 msec 2024-12-16T17:57:57,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-12-16T17:57:57,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, ASSIGN in 334 msec 2024-12-16T17:57:57,583 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T17:57:57,584 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371877583"}]},"ts":"1734371877583"} 2024-12-16T17:57:57,584 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-16T17:57:57,606 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T17:57:57,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2930 sec 2024-12-16T17:57:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-16T17:57:58,421 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 88 completed 2024-12-16T17:57:58,424 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c663007 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@209f566b 2024-12-16T17:57:58,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@545745e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:58,439 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:58,440 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:58,441 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T17:57:58,443 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T17:57:58,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:57:58,445 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:57:58,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-16T17:57:58,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742182_1358 (size=999) 2024-12-16T17:57:58,463 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-16T17:57:58,463 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-16T17:57:58,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:57:58,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, REOPEN/MOVE}] 2024-12-16T17:57:58,467 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, REOPEN/MOVE 2024-12-16T17:57:58,467 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:58,468 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:57:58,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:58,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:58,619 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,620 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:57:58,620 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 5e6b04d28ec6af2428036942def2f402, disabling compactions & flushes 2024-12-16T17:57:58,620 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,620 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,620 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. after waiting 0 ms 2024-12-16T17:57:58,620 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,624 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-16T17:57:58,625 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,625 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:57:58,625 WARN [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionServer(3786): Not adding moved region record: 5e6b04d28ec6af2428036942def2f402 to self. 2024-12-16T17:57:58,627 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,628 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=CLOSED 2024-12-16T17:57:58,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-16T17:57:58,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 in 161 msec 2024-12-16T17:57:58,632 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, REOPEN/MOVE; state=CLOSED, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=true 2024-12-16T17:57:58,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-16T17:57:58,782 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:58,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=93, state=RUNNABLE; OpenRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:57:58,939 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:58,946 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,946 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7285): Opening region: {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:57:58,948 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,948 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:57:58,948 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7327): checking encryption for 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,948 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7330): checking classloading for 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,950 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,951 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:58,952 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e6b04d28ec6af2428036942def2f402 columnFamilyName A 2024-12-16T17:57:58,953 DEBUG [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:58,953 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(327): Store=5e6b04d28ec6af2428036942def2f402/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:58,954 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,955 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:58,955 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e6b04d28ec6af2428036942def2f402 columnFamilyName B 2024-12-16T17:57:58,955 DEBUG [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:58,955 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(327): Store=5e6b04d28ec6af2428036942def2f402/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:58,956 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,956 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:57:58,957 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e6b04d28ec6af2428036942def2f402 columnFamilyName C 2024-12-16T17:57:58,957 DEBUG [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:58,957 INFO [StoreOpener-5e6b04d28ec6af2428036942def2f402-1 {}] regionserver.HStore(327): Store=5e6b04d28ec6af2428036942def2f402/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:57:58,957 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,958 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,959 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,961 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:57:58,962 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1085): writing seq id for 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:58,963 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1102): Opened 5e6b04d28ec6af2428036942def2f402; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67769548, jitterRate=0.009844958782196045}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:57:58,963 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1001): Region open journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:57:58,964 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., pid=95, masterSystemTime=1734371878939 2024-12-16T17:57:58,965 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,965 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:58,966 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=OPEN, openSeqNum=5, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:57:58,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-12-16T17:57:58,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; OpenRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 in 181 msec 2024-12-16T17:57:58,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-16T17:57:58,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, REOPEN/MOVE in 501 msec 2024-12-16T17:57:58,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-16T17:57:58,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 503 msec 2024-12-16T17:57:58,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 524 msec 2024-12-16T17:57:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-16T17:57:58,972 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x098697cd to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53f0a498 2024-12-16T17:57:59,000 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d669351, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,001 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f6b3f8c to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79e98a9d 2024-12-16T17:57:59,022 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@701c62ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,023 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63a1fd83 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@56293e08 2024-12-16T17:57:59,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b11d15, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,039 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dfb3ff1 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22bd9f38 2024-12-16T17:57:59,055 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f2967cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,056 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0efb8efe to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40f18446 2024-12-16T17:57:59,072 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64c29488, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,073 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49ac632a to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@393ad843 2024-12-16T17:57:59,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10f2683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,090 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x361d57a1 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@598a69e1 2024-12-16T17:57:59,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2149676d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,098 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x772c186a to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7de3b20f 2024-12-16T17:57:59,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@177bc0d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,107 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c68a792 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7b0bda 2024-12-16T17:57:59,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54f27cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,115 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x57e76ace to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43d69e33 2024-12-16T17:57:59,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f5ba0c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:57:59,133 DEBUG [hconnection-0x11ca3151-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:57:59,134 DEBUG [hconnection-0x5d1ffbc9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-12-16T17:57:59,134 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47356, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,135 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-16T17:57:59,135 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:57:59,135 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:57:59,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:57:59,139 DEBUG [hconnection-0x70e40eed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,139 DEBUG [hconnection-0x1900e2d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,139 DEBUG [hconnection-0x40424da4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,139 DEBUG [hconnection-0x164e1471-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,140 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,140 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,140 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,140 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47416, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,140 DEBUG [hconnection-0x742f0a62-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,141 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:59,144 DEBUG [hconnection-0x111403b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:57:59,145 DEBUG [hconnection-0xcd2da43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:57:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:57:59,145 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,145 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47438, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:57:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:57:59,152 DEBUG [hconnection-0x1b666f02-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:57:59,154 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:57:59,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371939161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371939161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371939161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371939162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371939162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c3ed0994eda04006a45121d220f3866a_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371879144/Put/seqid=0 2024-12-16T17:57:59,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742183_1359 (size=12154) 2024-12-16T17:57:59,206 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:57:59,209 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c3ed0994eda04006a45121d220f3866a_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216c3ed0994eda04006a45121d220f3866a_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:57:59,210 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/4d537f7d32ca4a95982b66a8a2c30b1b, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:57:59,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/4d537f7d32ca4a95982b66a8a2c30b1b is 175, key is test_row_0/A:col10/1734371879144/Put/seqid=0 2024-12-16T17:57:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742184_1360 (size=30955) 2024-12-16T17:57:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-16T17:57:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371939263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371939263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371939263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371939263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371939263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-16T17:57:59,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:57:59,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-16T17:57:59,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-16T17:57:59,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:57:59,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371939464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371939464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371939465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371939465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371939465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-16T17:57:59,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:57:59,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,629 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/4d537f7d32ca4a95982b66a8a2c30b1b 2024-12-16T17:57:59,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/2e8420cf5f2f4939a168ac84e01b764f is 50, key is test_row_0/B:col10/1734371879144/Put/seqid=0 2024-12-16T17:57:59,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742185_1361 (size=12001) 2024-12-16T17:57:59,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/2e8420cf5f2f4939a168ac84e01b764f 2024-12-16T17:57:59,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/2d7fa735d35d4245995f7015ad10ba94 is 50, key is test_row_0/C:col10/1734371879144/Put/seqid=0 2024-12-16T17:57:59,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742186_1362 (size=12001) 2024-12-16T17:57:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-16T17:57:59,745 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-16T17:57:59,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:57:59,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371939769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371939769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371939770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371939771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:57:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371939771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,897 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:57:59,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-16T17:57:59,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:57:59,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:57:59,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:57:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:00,049 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-16T17:58:00,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:00,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:00,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:00,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:00,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:00,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/2d7fa735d35d4245995f7015ad10ba94 2024-12-16T17:58:00,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/4d537f7d32ca4a95982b66a8a2c30b1b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/4d537f7d32ca4a95982b66a8a2c30b1b 2024-12-16T17:58:00,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/4d537f7d32ca4a95982b66a8a2c30b1b, entries=150, sequenceid=18, filesize=30.2 K 2024-12-16T17:58:00,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/2e8420cf5f2f4939a168ac84e01b764f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2e8420cf5f2f4939a168ac84e01b764f 2024-12-16T17:58:00,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2e8420cf5f2f4939a168ac84e01b764f, entries=150, sequenceid=18, filesize=11.7 K 2024-12-16T17:58:00,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/2d7fa735d35d4245995f7015ad10ba94 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/2d7fa735d35d4245995f7015ad10ba94 2024-12-16T17:58:00,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/2d7fa735d35d4245995f7015ad10ba94, entries=150, sequenceid=18, filesize=11.7 K 2024-12-16T17:58:00,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 5e6b04d28ec6af2428036942def2f402 in 967ms, sequenceid=18, compaction requested=false 2024-12-16T17:58:00,111 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-16T17:58:00,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:00,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-16T17:58:00,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:00,202 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-16T17:58:00,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:00,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:00,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:00,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:00,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:00,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:00,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a01d3f929dc2480eac10ab8af18a8c5f_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371879160/Put/seqid=0 2024-12-16T17:58:00,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742187_1363 (size=12154) 2024-12-16T17:58:00,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-16T17:58:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:00,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:00,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371940280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371940280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371940281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371940282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371940283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371940384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371940384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371940385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371940385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371940386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371940589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371940590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371940590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371940590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371940591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:00,620 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a01d3f929dc2480eac10ab8af18a8c5f_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a01d3f929dc2480eac10ab8af18a8c5f_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:00,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/38300227c1724ad59d9680ea2858ef85, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:00,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/38300227c1724ad59d9680ea2858ef85 is 175, key is test_row_0/A:col10/1734371879160/Put/seqid=0 2024-12-16T17:58:00,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742188_1364 (size=30955) 2024-12-16T17:58:00,626 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/38300227c1724ad59d9680ea2858ef85 2024-12-16T17:58:00,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/8a2baffa7ac4499bb68475f5e98c5d84 is 50, key is test_row_0/B:col10/1734371879160/Put/seqid=0 2024-12-16T17:58:00,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742189_1365 (size=12001) 2024-12-16T17:58:00,654 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/8a2baffa7ac4499bb68475f5e98c5d84 2024-12-16T17:58:00,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/8d52d9feda8941c9874a5e7a4cc797a1 is 50, key is test_row_0/C:col10/1734371879160/Put/seqid=0 2024-12-16T17:58:00,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742190_1366 (size=12001) 2024-12-16T17:58:00,682 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/8d52d9feda8941c9874a5e7a4cc797a1 2024-12-16T17:58:00,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/38300227c1724ad59d9680ea2858ef85 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/38300227c1724ad59d9680ea2858ef85 2024-12-16T17:58:00,690 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/38300227c1724ad59d9680ea2858ef85, entries=150, sequenceid=41, filesize=30.2 K 2024-12-16T17:58:00,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/8a2baffa7ac4499bb68475f5e98c5d84 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8a2baffa7ac4499bb68475f5e98c5d84 2024-12-16T17:58:00,703 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8a2baffa7ac4499bb68475f5e98c5d84, entries=150, sequenceid=41, filesize=11.7 K 2024-12-16T17:58:00,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/8d52d9feda8941c9874a5e7a4cc797a1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/8d52d9feda8941c9874a5e7a4cc797a1 2024-12-16T17:58:00,710 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/8d52d9feda8941c9874a5e7a4cc797a1, entries=150, sequenceid=41, filesize=11.7 K 2024-12-16T17:58:00,711 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5e6b04d28ec6af2428036942def2f402 in 509ms, sequenceid=41, compaction requested=false 2024-12-16T17:58:00,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:00,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:00,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-12-16T17:58:00,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-12-16T17:58:00,716 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-16T17:58:00,716 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5770 sec 2024-12-16T17:58:00,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 1.5840 sec 2024-12-16T17:58:00,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:00,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-16T17:58:00,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:00,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:00,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:00,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:00,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:00,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:00,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a22712a1c2da43d6afc255f4512ad2a0_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371880282/Put/seqid=0 2024-12-16T17:58:00,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742191_1367 (size=14594) 2024-12-16T17:58:00,933 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:00,936 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a22712a1c2da43d6afc255f4512ad2a0_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a22712a1c2da43d6afc255f4512ad2a0_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:00,937 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/5fc6f2d804d94ef6b05d6b8ca71339a1, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:00,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/5fc6f2d804d94ef6b05d6b8ca71339a1 is 175, key is test_row_0/A:col10/1734371880282/Put/seqid=0 2024-12-16T17:58:00,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742192_1368 (size=39549) 2024-12-16T17:58:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371940953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371940956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371940960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371940966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371940966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371941067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371941067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371941067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,078 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-16T17:58:01,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371941076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371941076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-16T17:58:01,245 INFO [Thread-1619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-16T17:58:01,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-12-16T17:58:01,255 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T17:58:01,255 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:01,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:01,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371941273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371941273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371941274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371941280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371941281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,342 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/5fc6f2d804d94ef6b05d6b8ca71339a1 2024-12-16T17:58:01,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T17:58:01,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/60be5871b2414931a931a63f37183210 is 50, key is test_row_0/B:col10/1734371880282/Put/seqid=0 2024-12-16T17:58:01,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742193_1369 (size=12001) 2024-12-16T17:58:01,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/60be5871b2414931a931a63f37183210 2024-12-16T17:58:01,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/6e2bb988e30a4629a84c26f8a9086745 is 50, key is test_row_0/C:col10/1734371880282/Put/seqid=0 2024-12-16T17:58:01,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-16T17:58:01,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:01,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742194_1370 (size=12001) 2024-12-16T17:58:01,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T17:58:01,560 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-16T17:58:01,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:01,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371941579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371941580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371941581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371941586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:01,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371941587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-16T17:58:01,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:01,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:01,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/6e2bb988e30a4629a84c26f8a9086745 2024-12-16T17:58:01,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/5fc6f2d804d94ef6b05d6b8ca71339a1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/5fc6f2d804d94ef6b05d6b8ca71339a1 2024-12-16T17:58:01,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/5fc6f2d804d94ef6b05d6b8ca71339a1, entries=200, sequenceid=56, filesize=38.6 K 2024-12-16T17:58:01,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/60be5871b2414931a931a63f37183210 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/60be5871b2414931a931a63f37183210 2024-12-16T17:58:01,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/60be5871b2414931a931a63f37183210, entries=150, sequenceid=56, filesize=11.7 K 2024-12-16T17:58:01,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/6e2bb988e30a4629a84c26f8a9086745 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6e2bb988e30a4629a84c26f8a9086745 2024-12-16T17:58:01,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6e2bb988e30a4629a84c26f8a9086745, entries=150, sequenceid=56, filesize=11.7 K 2024-12-16T17:58:01,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5e6b04d28ec6af2428036942def2f402 in 930ms, sequenceid=56, compaction requested=true 2024-12-16T17:58:01,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:01,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:01,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:01,826 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:01,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:01,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:01,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:01,826 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:01,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:01,826 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:01,826 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:01,827 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/B is initiating minor compaction (all files) 2024-12-16T17:58:01,827 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/A is initiating minor compaction (all files) 2024-12-16T17:58:01,827 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/A in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,827 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/B in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,827 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/4d537f7d32ca4a95982b66a8a2c30b1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/38300227c1724ad59d9680ea2858ef85, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/5fc6f2d804d94ef6b05d6b8ca71339a1] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=99.1 K 2024-12-16T17:58:01,827 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2e8420cf5f2f4939a168ac84e01b764f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8a2baffa7ac4499bb68475f5e98c5d84, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/60be5871b2414931a931a63f37183210] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.2 K 2024-12-16T17:58:01,827 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,827 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/4d537f7d32ca4a95982b66a8a2c30b1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/38300227c1724ad59d9680ea2858ef85, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/5fc6f2d804d94ef6b05d6b8ca71339a1] 2024-12-16T17:58:01,827 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e8420cf5f2f4939a168ac84e01b764f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734371879143 2024-12-16T17:58:01,827 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d537f7d32ca4a95982b66a8a2c30b1b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734371879143 2024-12-16T17:58:01,827 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a2baffa7ac4499bb68475f5e98c5d84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734371879159 2024-12-16T17:58:01,828 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 60be5871b2414931a931a63f37183210, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734371880282 2024-12-16T17:58:01,829 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38300227c1724ad59d9680ea2858ef85, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734371879159 2024-12-16T17:58:01,829 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fc6f2d804d94ef6b05d6b8ca71339a1, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734371880280 2024-12-16T17:58:01,835 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#B#compaction#318 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:01,836 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:01,836 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/cdc5532d91be4200bd2896affeee65bf is 50, key is test_row_0/B:col10/1734371880282/Put/seqid=0 2024-12-16T17:58:01,841 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216f8346032219348b49390a1fe93f11139_5e6b04d28ec6af2428036942def2f402 store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:01,843 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216f8346032219348b49390a1fe93f11139_5e6b04d28ec6af2428036942def2f402, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:01,843 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216f8346032219348b49390a1fe93f11139_5e6b04d28ec6af2428036942def2f402 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:01,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742195_1371 (size=12104) 2024-12-16T17:58:01,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742196_1372 (size=4469) 2024-12-16T17:58:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T17:58:01,860 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/cdc5532d91be4200bd2896affeee65bf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/cdc5532d91be4200bd2896affeee65bf 2024-12-16T17:58:01,864 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/B of 5e6b04d28ec6af2428036942def2f402 into cdc5532d91be4200bd2896affeee65bf(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:01,864 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:01,864 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/B, priority=13, startTime=1734371881826; duration=0sec 2024-12-16T17:58:01,864 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:01,864 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:B 2024-12-16T17:58:01,864 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:01,865 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:01,865 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/C is initiating minor compaction (all files) 2024-12-16T17:58:01,865 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/C in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,865 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/2d7fa735d35d4245995f7015ad10ba94, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/8d52d9feda8941c9874a5e7a4cc797a1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6e2bb988e30a4629a84c26f8a9086745] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.2 K 2024-12-16T17:58:01,865 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d7fa735d35d4245995f7015ad10ba94, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734371879143 2024-12-16T17:58:01,865 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:01,865 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d52d9feda8941c9874a5e7a4cc797a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734371879159 2024-12-16T17:58:01,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-16T17:58:01,865 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e2bb988e30a4629a84c26f8a9086745, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734371880282 2024-12-16T17:58:01,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:01,865 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-16T17:58:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:01,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216efcbed4337fd488c9ca7a6af6fb5efef_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371880930/Put/seqid=0 2024-12-16T17:58:01,876 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#C#compaction#321 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:01,877 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/ac3ef196633e405c95ae4d1518ac1940 is 50, key is test_row_0/C:col10/1734371880282/Put/seqid=0 2024-12-16T17:58:01,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742197_1373 (size=12154) 2024-12-16T17:58:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742198_1374 (size=12104) 2024-12-16T17:58:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:02,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:02,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371942124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371942124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371942125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371942126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371942126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371942230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371942230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371942231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371942232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371942232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,253 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#A#compaction#319 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:02,253 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/8a504e4f410d4bdfb5b22a41788b9749 is 175, key is test_row_0/A:col10/1734371880282/Put/seqid=0 2024-12-16T17:58:02,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742199_1375 (size=31058) 2024-12-16T17:58:02,263 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/8a504e4f410d4bdfb5b22a41788b9749 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/8a504e4f410d4bdfb5b22a41788b9749 2024-12-16T17:58:02,267 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/A of 5e6b04d28ec6af2428036942def2f402 into 8a504e4f410d4bdfb5b22a41788b9749(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:02,267 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:02,267 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/A, priority=13, startTime=1734371881825; duration=0sec 2024-12-16T17:58:02,267 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:02,267 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:A 2024-12-16T17:58:02,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:02,290 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216efcbed4337fd488c9ca7a6af6fb5efef_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216efcbed4337fd488c9ca7a6af6fb5efef_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fc427cf7f423422c88079bf6f96747aa, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fc427cf7f423422c88079bf6f96747aa is 175, key is test_row_0/A:col10/1734371880930/Put/seqid=0 2024-12-16T17:58:02,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742200_1376 (size=30955) 2024-12-16T17:58:02,311 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/ac3ef196633e405c95ae4d1518ac1940 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/ac3ef196633e405c95ae4d1518ac1940 2024-12-16T17:58:02,315 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/C of 5e6b04d28ec6af2428036942def2f402 into ac3ef196633e405c95ae4d1518ac1940(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:02,315 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:02,315 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/C, priority=13, startTime=1734371881826; duration=0sec 2024-12-16T17:58:02,315 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:02,315 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:C 2024-12-16T17:58:02,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T17:58:02,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371942433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371942434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371942434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371942434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371942434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,695 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fc427cf7f423422c88079bf6f96747aa 2024-12-16T17:58:02,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/71ffb2b20f344c458b9bc7222ab20c63 is 50, key is test_row_0/B:col10/1734371880930/Put/seqid=0 2024-12-16T17:58:02,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742201_1377 (size=12001) 2024-12-16T17:58:02,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371942738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371942739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371942741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371942741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:02,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:02,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371942742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:03,112 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/71ffb2b20f344c458b9bc7222ab20c63 2024-12-16T17:58:03,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/095f0fffe8c643669295993ff2d2940d is 50, key is test_row_0/C:col10/1734371880930/Put/seqid=0 2024-12-16T17:58:03,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742202_1378 (size=12001) 2024-12-16T17:58:03,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371943243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:03,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371943243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:03,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371943245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:03,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371943245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:03,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371943246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:03,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T17:58:03,524 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/095f0fffe8c643669295993ff2d2940d 2024-12-16T17:58:03,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fc427cf7f423422c88079bf6f96747aa as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fc427cf7f423422c88079bf6f96747aa 2024-12-16T17:58:03,531 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fc427cf7f423422c88079bf6f96747aa, entries=150, sequenceid=78, filesize=30.2 K 2024-12-16T17:58:03,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/71ffb2b20f344c458b9bc7222ab20c63 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/71ffb2b20f344c458b9bc7222ab20c63 2024-12-16T17:58:03,535 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/71ffb2b20f344c458b9bc7222ab20c63, entries=150, sequenceid=78, filesize=11.7 K 2024-12-16T17:58:03,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/095f0fffe8c643669295993ff2d2940d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/095f0fffe8c643669295993ff2d2940d 2024-12-16T17:58:03,539 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/095f0fffe8c643669295993ff2d2940d, entries=150, sequenceid=78, filesize=11.7 K 2024-12-16T17:58:03,539 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5e6b04d28ec6af2428036942def2f402 in 1674ms, sequenceid=78, compaction requested=false 2024-12-16T17:58:03,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:03,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:03,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-12-16T17:58:03,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-12-16T17:58:03,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-16T17:58:03,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2840 sec 2024-12-16T17:58:03,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 2.2890 sec 2024-12-16T17:58:04,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:04,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-16T17:58:04,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:04,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:04,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:04,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:04,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:04,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:04,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216d79fc3e569b74cd88adc6775b9bcb4be_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371882125/Put/seqid=0 2024-12-16T17:58:04,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742203_1379 (size=12154) 2024-12-16T17:58:04,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371944271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371944272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371944272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371944276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371944277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371944377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371944377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371944377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371944384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371944384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371944584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371944584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371944585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371944588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371944592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,662 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:04,666 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216d79fc3e569b74cd88adc6775b9bcb4be_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216d79fc3e569b74cd88adc6775b9bcb4be_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:04,667 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/57a5450f19e440f7aa07d7722a61620d, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:04,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/57a5450f19e440f7aa07d7722a61620d is 175, key is test_row_0/A:col10/1734371882125/Put/seqid=0 2024-12-16T17:58:04,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742204_1380 (size=30955) 2024-12-16T17:58:04,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371944891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371944892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371944892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371944895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:04,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:04,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371944899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,071 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/57a5450f19e440f7aa07d7722a61620d 2024-12-16T17:58:05,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/056a3964759f478f85f04a0919436ac4 is 50, key is test_row_0/B:col10/1734371882125/Put/seqid=0 2024-12-16T17:58:05,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742205_1381 (size=12001) 2024-12-16T17:58:05,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T17:58:05,361 INFO [Thread-1619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-16T17:58:05,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:05,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-12-16T17:58:05,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-16T17:58:05,363 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:05,364 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:05,364 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:05,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371945395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371945395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371945396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:05,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371945399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:05,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371945402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-16T17:58:05,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/056a3964759f478f85f04a0919436ac4 2024-12-16T17:58:05,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/486e5acf997541219ae6c4984ca3607b is 50, key is test_row_0/C:col10/1734371882125/Put/seqid=0 2024-12-16T17:58:05,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742206_1382 (size=12001) 2024-12-16T17:58:05,515 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-16T17:58:05,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:05,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-16T17:58:05,667 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-16T17:58:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,820 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-16T17:58:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:05,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/486e5acf997541219ae6c4984ca3607b 2024-12-16T17:58:05,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/57a5450f19e440f7aa07d7722a61620d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/57a5450f19e440f7aa07d7722a61620d 2024-12-16T17:58:05,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/57a5450f19e440f7aa07d7722a61620d, entries=150, sequenceid=96, filesize=30.2 K 2024-12-16T17:58:05,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/056a3964759f478f85f04a0919436ac4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/056a3964759f478f85f04a0919436ac4 2024-12-16T17:58:05,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/056a3964759f478f85f04a0919436ac4, entries=150, sequenceid=96, filesize=11.7 K 2024-12-16T17:58:05,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/486e5acf997541219ae6c4984ca3607b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/486e5acf997541219ae6c4984ca3607b 2024-12-16T17:58:05,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/486e5acf997541219ae6c4984ca3607b, entries=150, sequenceid=96, filesize=11.7 K 2024-12-16T17:58:05,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 5e6b04d28ec6af2428036942def2f402 in 1664ms, sequenceid=96, compaction requested=true 2024-12-16T17:58:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:05,915 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:05,915 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:05,916 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:05,916 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:05,916 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/B is initiating minor compaction (all files) 2024-12-16T17:58:05,916 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/A is initiating minor compaction (all files) 2024-12-16T17:58:05,916 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/A in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,916 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/B in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,916 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/cdc5532d91be4200bd2896affeee65bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/71ffb2b20f344c458b9bc7222ab20c63, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/056a3964759f478f85f04a0919436ac4] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.3 K 2024-12-16T17:58:05,916 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/8a504e4f410d4bdfb5b22a41788b9749, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fc427cf7f423422c88079bf6f96747aa, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/57a5450f19e440f7aa07d7722a61620d] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=90.8 K 2024-12-16T17:58:05,916 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,916 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/8a504e4f410d4bdfb5b22a41788b9749, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fc427cf7f423422c88079bf6f96747aa, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/57a5450f19e440f7aa07d7722a61620d] 2024-12-16T17:58:05,917 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a504e4f410d4bdfb5b22a41788b9749, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734371880282 2024-12-16T17:58:05,917 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting cdc5532d91be4200bd2896affeee65bf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734371880282 2024-12-16T17:58:05,917 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 71ffb2b20f344c458b9bc7222ab20c63, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734371880930 2024-12-16T17:58:05,917 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc427cf7f423422c88079bf6f96747aa, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734371880930 2024-12-16T17:58:05,917 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 056a3964759f478f85f04a0919436ac4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1734371882124 2024-12-16T17:58:05,917 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57a5450f19e440f7aa07d7722a61620d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1734371882124 2024-12-16T17:58:05,922 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#B#compaction#327 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:05,923 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:05,923 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/8d47a7a801f5421d8007a817bf4dae42 is 50, key is test_row_0/B:col10/1734371882125/Put/seqid=0 2024-12-16T17:58:05,932 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412166de3e442b5794b6bb6870ee42dd8310b_5e6b04d28ec6af2428036942def2f402 store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:05,934 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412166de3e442b5794b6bb6870ee42dd8310b_5e6b04d28ec6af2428036942def2f402, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:05,934 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412166de3e442b5794b6bb6870ee42dd8310b_5e6b04d28ec6af2428036942def2f402 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:05,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742207_1383 (size=12207) 2024-12-16T17:58:05,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742208_1384 (size=4469) 2024-12-16T17:58:05,958 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#A#compaction#328 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:05,959 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/f0adba6462a54448bbcac5657e7e2978 is 175, key is test_row_0/A:col10/1734371882125/Put/seqid=0 2024-12-16T17:58:05,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742209_1385 (size=31161) 2024-12-16T17:58:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-16T17:58:05,969 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/f0adba6462a54448bbcac5657e7e2978 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f0adba6462a54448bbcac5657e7e2978 2024-12-16T17:58:05,972 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:05,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-16T17:58:05,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,973 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-16T17:58:05,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:05,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:05,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:05,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:05,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:05,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:05,973 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/A of 5e6b04d28ec6af2428036942def2f402 into f0adba6462a54448bbcac5657e7e2978(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:05,973 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:05,973 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/A, priority=13, startTime=1734371885915; duration=0sec 2024-12-16T17:58:05,974 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:05,974 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:A 2024-12-16T17:58:05,974 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:05,974 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:05,974 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/C is initiating minor compaction (all files) 2024-12-16T17:58:05,974 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/C in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:05,975 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/ac3ef196633e405c95ae4d1518ac1940, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/095f0fffe8c643669295993ff2d2940d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/486e5acf997541219ae6c4984ca3607b] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.3 K 2024-12-16T17:58:05,975 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac3ef196633e405c95ae4d1518ac1940, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734371880282 2024-12-16T17:58:05,975 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 095f0fffe8c643669295993ff2d2940d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734371880930 2024-12-16T17:58:05,975 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 486e5acf997541219ae6c4984ca3607b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1734371882124 2024-12-16T17:58:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168cb75d6985444a4e849602bab1985ff0_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371884271/Put/seqid=0 2024-12-16T17:58:05,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742210_1386 (size=12154) 2024-12-16T17:58:05,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:05,984 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#C#compaction#330 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:05,985 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/b532c1b87a8e44d98263e607d6e41058 is 50, key is test_row_0/C:col10/1734371882125/Put/seqid=0 2024-12-16T17:58:05,990 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168cb75d6985444a4e849602bab1985ff0_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168cb75d6985444a4e849602bab1985ff0_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:05,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fac681912eeb4b139f3a8776ee5e61dd, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:05,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fac681912eeb4b139f3a8776ee5e61dd is 175, key is test_row_0/A:col10/1734371884271/Put/seqid=0 2024-12-16T17:58:05,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742211_1387 (size=12207) 2024-12-16T17:58:06,006 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/b532c1b87a8e44d98263e607d6e41058 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/b532c1b87a8e44d98263e607d6e41058 2024-12-16T17:58:06,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742212_1388 (size=30955) 2024-12-16T17:58:06,007 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fac681912eeb4b139f3a8776ee5e61dd 2024-12-16T17:58:06,011 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/C of 5e6b04d28ec6af2428036942def2f402 into b532c1b87a8e44d98263e607d6e41058(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:06,011 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:06,011 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/C, priority=13, startTime=1734371885915; duration=0sec 2024-12-16T17:58:06,011 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:06,011 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:C 2024-12-16T17:58:06,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/03f75cd228fd4363950b2e374c40e59b is 50, key is test_row_0/B:col10/1734371884271/Put/seqid=0 2024-12-16T17:58:06,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742213_1389 (size=12001) 2024-12-16T17:58:06,017 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/03f75cd228fd4363950b2e374c40e59b 2024-12-16T17:58:06,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/a451c1cba1bc42b8ac4cac6c89fc3d91 is 50, key is test_row_0/C:col10/1734371884271/Put/seqid=0 2024-12-16T17:58:06,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742214_1390 (size=12001) 2024-12-16T17:58:06,347 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/8d47a7a801f5421d8007a817bf4dae42 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8d47a7a801f5421d8007a817bf4dae42 2024-12-16T17:58:06,351 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/B of 5e6b04d28ec6af2428036942def2f402 into 8d47a7a801f5421d8007a817bf4dae42(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:06,351 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:06,351 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/B, priority=13, startTime=1734371885915; duration=0sec 2024-12-16T17:58:06,351 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:06,351 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:B 2024-12-16T17:58:06,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:06,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371946415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371946415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371946419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371946420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371946420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,428 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/a451c1cba1bc42b8ac4cac6c89fc3d91 2024-12-16T17:58:06,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fac681912eeb4b139f3a8776ee5e61dd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fac681912eeb4b139f3a8776ee5e61dd 2024-12-16T17:58:06,435 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fac681912eeb4b139f3a8776ee5e61dd, entries=150, sequenceid=117, filesize=30.2 K 2024-12-16T17:58:06,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/03f75cd228fd4363950b2e374c40e59b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/03f75cd228fd4363950b2e374c40e59b 2024-12-16T17:58:06,439 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/03f75cd228fd4363950b2e374c40e59b, entries=150, sequenceid=117, filesize=11.7 K 2024-12-16T17:58:06,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/a451c1cba1bc42b8ac4cac6c89fc3d91 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/a451c1cba1bc42b8ac4cac6c89fc3d91 2024-12-16T17:58:06,442 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/a451c1cba1bc42b8ac4cac6c89fc3d91, entries=150, sequenceid=117, filesize=11.7 K 2024-12-16T17:58:06,443 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5e6b04d28ec6af2428036942def2f402 in 471ms, sequenceid=117, compaction requested=false 2024-12-16T17:58:06,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:06,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:06,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-12-16T17:58:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-12-16T17:58:06,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-16T17:58:06,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0800 sec 2024-12-16T17:58:06,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 1.0830 sec 2024-12-16T17:58:06,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-16T17:58:06,466 INFO [Thread-1619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-16T17:58:06,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:06,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-16T17:58:06,468 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:06,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-16T17:58:06,469 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:06,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:06,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-16T17:58:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:06,529 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121699add50dc6ef49fa9a7af6291b6f467b_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371886522/Put/seqid=0 2024-12-16T17:58:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742215_1391 (size=14794) 2024-12-16T17:58:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-16T17:58:06,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371946578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371946585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371946585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371946586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:06,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371946686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371946690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371946691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371946695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-16T17:58:06,776 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:06,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:06,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:06,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:06,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371946891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371946895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371946896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:06,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371946901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:06,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:06,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:06,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:06,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:06,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:06,933 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:06,936 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121699add50dc6ef49fa9a7af6291b6f467b_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121699add50dc6ef49fa9a7af6291b6f467b_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:06,937 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/312b6669a37549bf8d85ee4c7c8b57da, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:06,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/312b6669a37549bf8d85ee4c7c8b57da is 175, key is test_row_0/A:col10/1734371886522/Put/seqid=0 2024-12-16T17:58:06,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742216_1392 (size=39749) 2024-12-16T17:58:07,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-16T17:58:07,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:07,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:07,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371947194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371947199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371947200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371947204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:07,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:07,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,341 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=136, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/312b6669a37549bf8d85ee4c7c8b57da 2024-12-16T17:58:07,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0898b6b0f6fe4b4387a9c739002b19c1 is 50, key is test_row_0/B:col10/1734371886522/Put/seqid=0 2024-12-16T17:58:07,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742217_1393 (size=12151) 2024-12-16T17:58:07,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:07,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:07,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,539 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:07,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:07,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-16T17:58:07,692 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:07,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:07,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371947698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371947704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371947705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:07,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371947708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0898b6b0f6fe4b4387a9c739002b19c1 2024-12-16T17:58:07,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/c0af2f1513bf436f8ed595165039d51f is 50, key is test_row_0/C:col10/1734371886522/Put/seqid=0 2024-12-16T17:58:07,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742218_1394 (size=12151) 2024-12-16T17:58:07,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:07,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:07,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:08,149 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:08,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:08,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:08,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:08,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:08,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:08,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/c0af2f1513bf436f8ed595165039d51f 2024-12-16T17:58:08,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/312b6669a37549bf8d85ee4c7c8b57da as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/312b6669a37549bf8d85ee4c7c8b57da 2024-12-16T17:58:08,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/312b6669a37549bf8d85ee4c7c8b57da, entries=200, sequenceid=136, filesize=38.8 K 2024-12-16T17:58:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0898b6b0f6fe4b4387a9c739002b19c1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0898b6b0f6fe4b4387a9c739002b19c1 2024-12-16T17:58:08,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0898b6b0f6fe4b4387a9c739002b19c1, entries=150, sequenceid=136, filesize=11.9 K 2024-12-16T17:58:08,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/c0af2f1513bf436f8ed595165039d51f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/c0af2f1513bf436f8ed595165039d51f 2024-12-16T17:58:08,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/c0af2f1513bf436f8ed595165039d51f, entries=150, sequenceid=136, filesize=11.9 K 2024-12-16T17:58:08,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 5e6b04d28ec6af2428036942def2f402 in 1653ms, sequenceid=136, compaction requested=true 2024-12-16T17:58:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:08,176 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:08,176 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:08,177 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:08,177 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:08,177 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/A is initiating minor compaction (all files) 2024-12-16T17:58:08,177 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/B is initiating minor compaction (all files) 2024-12-16T17:58:08,177 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/A in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:08,177 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/B in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:08,177 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f0adba6462a54448bbcac5657e7e2978, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fac681912eeb4b139f3a8776ee5e61dd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/312b6669a37549bf8d85ee4c7c8b57da] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=99.5 K 2024-12-16T17:58:08,177 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8d47a7a801f5421d8007a817bf4dae42, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/03f75cd228fd4363950b2e374c40e59b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0898b6b0f6fe4b4387a9c739002b19c1] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.5 K 2024-12-16T17:58:08,177 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:08,177 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f0adba6462a54448bbcac5657e7e2978, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fac681912eeb4b139f3a8776ee5e61dd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/312b6669a37549bf8d85ee4c7c8b57da] 2024-12-16T17:58:08,178 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d47a7a801f5421d8007a817bf4dae42, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1734371882124 2024-12-16T17:58:08,178 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0adba6462a54448bbcac5657e7e2978, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1734371882124 2024-12-16T17:58:08,178 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting fac681912eeb4b139f3a8776ee5e61dd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734371884271 2024-12-16T17:58:08,178 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 03f75cd228fd4363950b2e374c40e59b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734371884271 2024-12-16T17:58:08,178 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 312b6669a37549bf8d85ee4c7c8b57da, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1734371886415 2024-12-16T17:58:08,178 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0898b6b0f6fe4b4387a9c739002b19c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1734371886415 2024-12-16T17:58:08,184 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#B#compaction#336 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:08,184 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/6fe9832ac64048228d31b3825a0726eb is 50, key is test_row_0/B:col10/1734371886522/Put/seqid=0 2024-12-16T17:58:08,185 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:08,189 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216f46c7b293a6c42a2a1a93bda6b057fb8_5e6b04d28ec6af2428036942def2f402 store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:08,190 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216f46c7b293a6c42a2a1a93bda6b057fb8_5e6b04d28ec6af2428036942def2f402, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:08,191 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216f46c7b293a6c42a2a1a93bda6b057fb8_5e6b04d28ec6af2428036942def2f402 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:08,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742219_1395 (size=12459) 2024-12-16T17:58:08,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742220_1396 (size=4469) 2024-12-16T17:58:08,302 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-16T17:58:08,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:08,302 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-16T17:58:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:08,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b8da11a678514ad390cf62eb042afe11_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371886585/Put/seqid=0 2024-12-16T17:58:08,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742221_1397 (size=12304) 2024-12-16T17:58:08,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:08,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:08,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371948503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-16T17:58:08,602 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#A#compaction#337 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:08,603 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/c47d3a1292c845c790b0719bf3c8aeb5 is 175, key is test_row_0/A:col10/1734371886522/Put/seqid=0 2024-12-16T17:58:08,603 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/6fe9832ac64048228d31b3825a0726eb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/6fe9832ac64048228d31b3825a0726eb 2024-12-16T17:58:08,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742222_1398 (size=31413) 2024-12-16T17:58:08,608 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/B of 5e6b04d28ec6af2428036942def2f402 into 6fe9832ac64048228d31b3825a0726eb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:08,608 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:08,608 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/B, priority=13, startTime=1734371888176; duration=0sec 2024-12-16T17:58:08,608 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:08,608 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:B 2024-12-16T17:58:08,608 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:08,609 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:08,609 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/C is initiating minor compaction (all files) 2024-12-16T17:58:08,609 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/C in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:08,609 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/b532c1b87a8e44d98263e607d6e41058, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/a451c1cba1bc42b8ac4cac6c89fc3d91, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/c0af2f1513bf436f8ed595165039d51f] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.5 K 2024-12-16T17:58:08,610 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b532c1b87a8e44d98263e607d6e41058, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1734371882124 2024-12-16T17:58:08,611 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting a451c1cba1bc42b8ac4cac6c89fc3d91, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734371884271 2024-12-16T17:58:08,611 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting c0af2f1513bf436f8ed595165039d51f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1734371886415 2024-12-16T17:58:08,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:08,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371948608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,613 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/c47d3a1292c845c790b0719bf3c8aeb5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/c47d3a1292c845c790b0719bf3c8aeb5 2024-12-16T17:58:08,616 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/A of 5e6b04d28ec6af2428036942def2f402 into c47d3a1292c845c790b0719bf3c8aeb5(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:08,616 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:08,617 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/A, priority=13, startTime=1734371888176; duration=0sec 2024-12-16T17:58:08,617 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:08,617 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:A 2024-12-16T17:58:08,618 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#C#compaction#339 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:08,619 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/24805f2fc07b4eed82aa2fcd12be72bf is 50, key is test_row_0/C:col10/1734371886522/Put/seqid=0 2024-12-16T17:58:08,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742223_1399 (size=12459) 2024-12-16T17:58:08,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371948710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:08,715 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b8da11a678514ad390cf62eb042afe11_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b8da11a678514ad390cf62eb042afe11_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:08,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fcbe8a2e0593421ea0af3408a6114fea, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:08,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fcbe8a2e0593421ea0af3408a6114fea is 175, key is test_row_0/A:col10/1734371886585/Put/seqid=0 2024-12-16T17:58:08,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371948716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742224_1400 (size=31105) 2024-12-16T17:58:08,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371948717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,720 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fcbe8a2e0593421ea0af3408a6114fea 2024-12-16T17:58:08,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371948719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:08,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/f693f1fe45384cf7a40e44b320b487d1 is 50, key is test_row_0/B:col10/1734371886585/Put/seqid=0 2024-12-16T17:58:08,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742225_1401 (size=12151) 2024-12-16T17:58:08,762 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-16T17:58:08,762 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-16T17:58:08,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371948813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:09,030 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/24805f2fc07b4eed82aa2fcd12be72bf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/24805f2fc07b4eed82aa2fcd12be72bf 2024-12-16T17:58:09,033 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/C of 5e6b04d28ec6af2428036942def2f402 into 24805f2fc07b4eed82aa2fcd12be72bf(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:09,033 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:09,033 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/C, priority=13, startTime=1734371888176; duration=0sec 2024-12-16T17:58:09,033 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:09,033 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:C 2024-12-16T17:58:09,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:09,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371949118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:09,132 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/f693f1fe45384cf7a40e44b320b487d1 2024-12-16T17:58:09,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/063c2529541b4ec9b3852891d8a08f0a is 50, key is test_row_0/C:col10/1734371886585/Put/seqid=0 2024-12-16T17:58:09,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742226_1402 (size=12151) 2024-12-16T17:58:09,141 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/063c2529541b4ec9b3852891d8a08f0a 2024-12-16T17:58:09,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/fcbe8a2e0593421ea0af3408a6114fea as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fcbe8a2e0593421ea0af3408a6114fea 2024-12-16T17:58:09,152 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fcbe8a2e0593421ea0af3408a6114fea, entries=150, sequenceid=157, filesize=30.4 K 2024-12-16T17:58:09,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/f693f1fe45384cf7a40e44b320b487d1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/f693f1fe45384cf7a40e44b320b487d1 2024-12-16T17:58:09,156 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/f693f1fe45384cf7a40e44b320b487d1, entries=150, sequenceid=157, filesize=11.9 K 2024-12-16T17:58:09,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/063c2529541b4ec9b3852891d8a08f0a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/063c2529541b4ec9b3852891d8a08f0a 2024-12-16T17:58:09,162 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/063c2529541b4ec9b3852891d8a08f0a, entries=150, sequenceid=157, filesize=11.9 K 2024-12-16T17:58:09,163 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5e6b04d28ec6af2428036942def2f402 in 861ms, sequenceid=157, compaction requested=false 2024-12-16T17:58:09,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:09,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:09,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-16T17:58:09,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-16T17:58:09,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-16T17:58:09,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6950 sec 2024-12-16T17:58:09,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.6990 sec 2024-12-16T17:58:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:09,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-16T17:58:09,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:09,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:09,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:09,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:09,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:09,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:09,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412160f7ab92de5e34392953c4fdba925b934_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371888502/Put/seqid=0 2024-12-16T17:58:09,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742227_1403 (size=14794) 2024-12-16T17:58:09,646 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:09,649 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412160f7ab92de5e34392953c4fdba925b934_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412160f7ab92de5e34392953c4fdba925b934_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:09,650 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/a6c965499bc448c5b211ee6f00a40ec9, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:09,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/a6c965499bc448c5b211ee6f00a40ec9 is 175, key is test_row_0/A:col10/1734371888502/Put/seqid=0 2024-12-16T17:58:09,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742228_1404 (size=39749) 2024-12-16T17:58:09,655 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/a6c965499bc448c5b211ee6f00a40ec9 2024-12-16T17:58:09,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/870a6df549f140bf8c29855aa8ec89fa is 50, key is test_row_0/B:col10/1734371888502/Put/seqid=0 2024-12-16T17:58:09,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742229_1405 (size=12151) 2024-12-16T17:58:09,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/870a6df549f140bf8c29855aa8ec89fa 2024-12-16T17:58:09,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/0acd56e749bc4aa0abe8516f1fe4b094 is 50, key is test_row_0/C:col10/1734371888502/Put/seqid=0 2024-12-16T17:58:09,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742230_1406 (size=12151) 2024-12-16T17:58:09,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/0acd56e749bc4aa0abe8516f1fe4b094 2024-12-16T17:58:09,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/a6c965499bc448c5b211ee6f00a40ec9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/a6c965499bc448c5b211ee6f00a40ec9 2024-12-16T17:58:09,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/a6c965499bc448c5b211ee6f00a40ec9, entries=200, sequenceid=176, filesize=38.8 K 2024-12-16T17:58:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/870a6df549f140bf8c29855aa8ec89fa as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/870a6df549f140bf8c29855aa8ec89fa 2024-12-16T17:58:09,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/870a6df549f140bf8c29855aa8ec89fa, entries=150, sequenceid=176, filesize=11.9 K 2024-12-16T17:58:09,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/0acd56e749bc4aa0abe8516f1fe4b094 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/0acd56e749bc4aa0abe8516f1fe4b094 2024-12-16T17:58:09,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/0acd56e749bc4aa0abe8516f1fe4b094, entries=150, sequenceid=176, filesize=11.9 K 2024-12-16T17:58:09,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=93.93 KB/96180 for 5e6b04d28ec6af2428036942def2f402 in 93ms, sequenceid=176, compaction requested=true 2024-12-16T17:58:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:09,719 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:09,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:09,719 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:09,720 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:09,720 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:09,720 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/B is initiating minor compaction (all files) 2024-12-16T17:58:09,720 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/A is initiating minor compaction (all files) 2024-12-16T17:58:09,720 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/B in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:09,720 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/A in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:09,720 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/6fe9832ac64048228d31b3825a0726eb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/f693f1fe45384cf7a40e44b320b487d1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/870a6df549f140bf8c29855aa8ec89fa] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.9 K 2024-12-16T17:58:09,720 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/c47d3a1292c845c790b0719bf3c8aeb5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fcbe8a2e0593421ea0af3408a6114fea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/a6c965499bc448c5b211ee6f00a40ec9] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=99.9 K 2024-12-16T17:58:09,720 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:09,720 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/c47d3a1292c845c790b0719bf3c8aeb5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fcbe8a2e0593421ea0af3408a6114fea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/a6c965499bc448c5b211ee6f00a40ec9] 2024-12-16T17:58:09,721 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fe9832ac64048228d31b3825a0726eb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1734371886415 2024-12-16T17:58:09,721 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c47d3a1292c845c790b0719bf3c8aeb5, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1734371886415 2024-12-16T17:58:09,721 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting f693f1fe45384cf7a40e44b320b487d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371886546 2024-12-16T17:58:09,721 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcbe8a2e0593421ea0af3408a6114fea, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371886546 2024-12-16T17:58:09,721 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 870a6df549f140bf8c29855aa8ec89fa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734371888485 2024-12-16T17:58:09,721 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6c965499bc448c5b211ee6f00a40ec9, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734371888485 2024-12-16T17:58:09,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:09,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:58:09,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:09,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:09,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:09,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:09,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:09,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:09,734 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#B#compaction#345 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:09,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412160877023579804a7b8c88b3d6d5f95d7f_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371889721/Put/seqid=0 2024-12-16T17:58:09,740 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:09,742 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/988924f52f2d4b9bb7bf1392875e7daf is 50, key is test_row_0/B:col10/1734371888502/Put/seqid=0 2024-12-16T17:58:09,745 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216c7114240be9741999bc737b4fd0a8441_5e6b04d28ec6af2428036942def2f402 store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:09,748 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216c7114240be9741999bc737b4fd0a8441_5e6b04d28ec6af2428036942def2f402, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:09,748 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c7114240be9741999bc737b4fd0a8441_5e6b04d28ec6af2428036942def2f402 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:09,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742231_1407 (size=14794) 2024-12-16T17:58:09,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742232_1408 (size=12561) 2024-12-16T17:58:09,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742233_1409 (size=4469) 2024-12-16T17:58:09,754 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#A#compaction#347 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:09,755 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/f87d41a2049246ce8cdabe8b68011780 is 175, key is test_row_0/A:col10/1734371888502/Put/seqid=0 2024-12-16T17:58:09,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742234_1410 (size=31515) 2024-12-16T17:58:09,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:09,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371949827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:09,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371949932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:10,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371950137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,152 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:10,155 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412160877023579804a7b8c88b3d6d5f95d7f_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412160877023579804a7b8c88b3d6d5f95d7f_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:10,156 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/983d516f234a4226af57f9ae117cd5f2, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:10,156 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/988924f52f2d4b9bb7bf1392875e7daf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/988924f52f2d4b9bb7bf1392875e7daf 2024-12-16T17:58:10,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/983d516f234a4226af57f9ae117cd5f2 is 175, key is test_row_0/A:col10/1734371889721/Put/seqid=0 2024-12-16T17:58:10,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742235_1411 (size=39749) 2024-12-16T17:58:10,161 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/B of 5e6b04d28ec6af2428036942def2f402 into 988924f52f2d4b9bb7bf1392875e7daf(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:10,161 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:10,161 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/B, priority=13, startTime=1734371889719; duration=0sec 2024-12-16T17:58:10,161 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:10,161 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:B 2024-12-16T17:58:10,161 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:10,162 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:10,163 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/C is initiating minor compaction (all files) 2024-12-16T17:58:10,163 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/C in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:10,163 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/24805f2fc07b4eed82aa2fcd12be72bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/063c2529541b4ec9b3852891d8a08f0a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/0acd56e749bc4aa0abe8516f1fe4b094] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=35.9 K 2024-12-16T17:58:10,163 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 24805f2fc07b4eed82aa2fcd12be72bf, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1734371886415 2024-12-16T17:58:10,163 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/f87d41a2049246ce8cdabe8b68011780 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f87d41a2049246ce8cdabe8b68011780 2024-12-16T17:58:10,163 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 063c2529541b4ec9b3852891d8a08f0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371886546 2024-12-16T17:58:10,164 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0acd56e749bc4aa0abe8516f1fe4b094, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734371888485 2024-12-16T17:58:10,168 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/A of 5e6b04d28ec6af2428036942def2f402 into f87d41a2049246ce8cdabe8b68011780(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:10,168 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:10,168 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/A, priority=13, startTime=1734371889719; duration=0sec 2024-12-16T17:58:10,168 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:10,168 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:A 2024-12-16T17:58:10,172 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#C#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:10,172 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/957082c3e51949f9be313487f1bd0179 is 50, key is test_row_0/C:col10/1734371888502/Put/seqid=0 2024-12-16T17:58:10,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742236_1412 (size=12561) 2024-12-16T17:58:10,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:10,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371950442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,561 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/983d516f234a4226af57f9ae117cd5f2 2024-12-16T17:58:10,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-16T17:58:10,573 INFO [Thread-1619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-16T17:58:10,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:10,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-16T17:58:10,615 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:10,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T17:58:10,617 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:10,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:10,618 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/957082c3e51949f9be313487f1bd0179 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/957082c3e51949f9be313487f1bd0179 2024-12-16T17:58:10,623 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/C of 5e6b04d28ec6af2428036942def2f402 into 957082c3e51949f9be313487f1bd0179(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:10,623 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:10,623 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/C, priority=13, startTime=1734371889719; duration=0sec 2024-12-16T17:58:10,623 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:10,623 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:C 2024-12-16T17:58:10,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/179e27276dfa43b9bc007104d29e8c73 is 50, key is test_row_0/B:col10/1734371889721/Put/seqid=0 2024-12-16T17:58:10,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742237_1413 (size=12151) 2024-12-16T17:58:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T17:58:10,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371950719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,724 DEBUG [Thread-1615 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., hostname=3609ad07831c,39733,1734371789085, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:10,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371950720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,725 DEBUG [Thread-1609 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., hostname=3609ad07831c,39733,1734371789085, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:10,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:10,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371950736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,740 DEBUG [Thread-1611 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., hostname=3609ad07831c,39733,1734371789085, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:10,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:10,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371950739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,741 DEBUG [Thread-1613 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., hostname=3609ad07831c,39733,1734371789085, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:10,769 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-16T17:58:10,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:10,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:10,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:10,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:10,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T17:58:10,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:10,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-16T17:58:10,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:10,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:10,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:10,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:10,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:10,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:10,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:10,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371950950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:11,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/179e27276dfa43b9bc007104d29e8c73 2024-12-16T17:58:11,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/31be58ec76ea4d5aaaa8bd6221c3908c is 50, key is test_row_0/C:col10/1734371889721/Put/seqid=0 2024-12-16T17:58:11,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742238_1414 (size=12151) 2024-12-16T17:58:11,074 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:11,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-16T17:58:11,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:11,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:11,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:11,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T17:58:11,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:11,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-16T17:58:11,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:11,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:11,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:11,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:11,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-16T17:58:11,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:11,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:11,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:11,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:11,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/31be58ec76ea4d5aaaa8bd6221c3908c 2024-12-16T17:58:11,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/983d516f234a4226af57f9ae117cd5f2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/983d516f234a4226af57f9ae117cd5f2 2024-12-16T17:58:11,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/983d516f234a4226af57f9ae117cd5f2, entries=200, sequenceid=194, filesize=38.8 K 2024-12-16T17:58:11,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/179e27276dfa43b9bc007104d29e8c73 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/179e27276dfa43b9bc007104d29e8c73 2024-12-16T17:58:11,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/179e27276dfa43b9bc007104d29e8c73, entries=150, sequenceid=194, filesize=11.9 K 2024-12-16T17:58:11,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/31be58ec76ea4d5aaaa8bd6221c3908c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/31be58ec76ea4d5aaaa8bd6221c3908c 2024-12-16T17:58:11,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/31be58ec76ea4d5aaaa8bd6221c3908c, entries=150, sequenceid=194, filesize=11.9 K 2024-12-16T17:58:11,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e6b04d28ec6af2428036942def2f402 in 1760ms, sequenceid=194, compaction requested=false 2024-12-16T17:58:11,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:11,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:11,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-16T17:58:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:11,532 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:58:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:11,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412167512855679534aa9b90ec84cce405956_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371889826/Put/seqid=0 2024-12-16T17:58:11,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742239_1415 (size=12304) 2024-12-16T17:58:11,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:11,544 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412167512855679534aa9b90ec84cce405956_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412167512855679534aa9b90ec84cce405956_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:11,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d3bf8291f8e54edeb58d279704020c3f, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:11,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d3bf8291f8e54edeb58d279704020c3f is 175, key is test_row_0/A:col10/1734371889826/Put/seqid=0 2024-12-16T17:58:11,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742240_1416 (size=31105) 2024-12-16T17:58:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T17:58:11,949 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d3bf8291f8e54edeb58d279704020c3f 2024-12-16T17:58:11,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/b6a68f7c4b354b43aaf979a48e13cd6c is 50, key is test_row_0/B:col10/1734371889826/Put/seqid=0 2024-12-16T17:58:11,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:11,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:11,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742241_1417 (size=12151) 2024-12-16T17:58:12,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:12,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371952029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:12,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:12,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371952135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:12,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:12,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371952340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:12,375 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/b6a68f7c4b354b43aaf979a48e13cd6c 2024-12-16T17:58:12,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/35048e068dd345879b293cb17eae85ef is 50, key is test_row_0/C:col10/1734371889826/Put/seqid=0 2024-12-16T17:58:12,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742242_1418 (size=12151) 2024-12-16T17:58:12,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:12,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371952644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:12,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T17:58:12,788 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/35048e068dd345879b293cb17eae85ef 2024-12-16T17:58:12,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d3bf8291f8e54edeb58d279704020c3f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d3bf8291f8e54edeb58d279704020c3f 2024-12-16T17:58:12,795 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d3bf8291f8e54edeb58d279704020c3f, entries=150, sequenceid=215, filesize=30.4 K 2024-12-16T17:58:12,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/b6a68f7c4b354b43aaf979a48e13cd6c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/b6a68f7c4b354b43aaf979a48e13cd6c 2024-12-16T17:58:12,799 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/b6a68f7c4b354b43aaf979a48e13cd6c, entries=150, sequenceid=215, filesize=11.9 K 2024-12-16T17:58:12,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/35048e068dd345879b293cb17eae85ef as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/35048e068dd345879b293cb17eae85ef 2024-12-16T17:58:12,804 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/35048e068dd345879b293cb17eae85ef, entries=150, sequenceid=215, filesize=11.9 K 2024-12-16T17:58:12,804 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e6b04d28ec6af2428036942def2f402 in 1272ms, sequenceid=215, compaction requested=true 2024-12-16T17:58:12,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:12,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:12,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-16T17:58:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-16T17:58:12,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-16T17:58:12,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1880 sec 2024-12-16T17:58:12,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.2330 sec 2024-12-16T17:58:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:13,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-16T17:58:13,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:13,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:13,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:13,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:13,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:13,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:13,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a33c2cae0a9241ed8c183e17cb2476b7_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371893155/Put/seqid=0 2024-12-16T17:58:13,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742243_1419 (size=14794) 2024-12-16T17:58:13,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371953212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:13,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371953318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:13,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371953522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:13,571 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:13,574 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a33c2cae0a9241ed8c183e17cb2476b7_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a33c2cae0a9241ed8c183e17cb2476b7_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:13,575 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/52e2be065caf415097791716075815b1, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:13,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/52e2be065caf415097791716075815b1 is 175, key is test_row_0/A:col10/1734371893155/Put/seqid=0 2024-12-16T17:58:13,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742244_1420 (size=39749) 2024-12-16T17:58:13,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371953829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:13,979 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/52e2be065caf415097791716075815b1 2024-12-16T17:58:13,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0233c01804df412d90ab808e61a23bcd is 50, key is test_row_0/B:col10/1734371893155/Put/seqid=0 2024-12-16T17:58:13,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742245_1421 (size=12151) 2024-12-16T17:58:13,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0233c01804df412d90ab808e61a23bcd 2024-12-16T17:58:13,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/735c925dbb90443c9a5ace520f9aa630 is 50, key is test_row_0/C:col10/1734371893155/Put/seqid=0 2024-12-16T17:58:14,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742246_1422 (size=12151) 2024-12-16T17:58:14,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371954336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/735c925dbb90443c9a5ace520f9aa630 2024-12-16T17:58:14,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/52e2be065caf415097791716075815b1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/52e2be065caf415097791716075815b1 2024-12-16T17:58:14,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/52e2be065caf415097791716075815b1, entries=200, sequenceid=234, filesize=38.8 K 2024-12-16T17:58:14,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0233c01804df412d90ab808e61a23bcd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0233c01804df412d90ab808e61a23bcd 2024-12-16T17:58:14,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0233c01804df412d90ab808e61a23bcd, entries=150, sequenceid=234, filesize=11.9 K 2024-12-16T17:58:14,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/735c925dbb90443c9a5ace520f9aa630 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/735c925dbb90443c9a5ace520f9aa630 2024-12-16T17:58:14,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/735c925dbb90443c9a5ace520f9aa630, entries=150, sequenceid=234, filesize=11.9 K 2024-12-16T17:58:14,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 5e6b04d28ec6af2428036942def2f402 in 1259ms, sequenceid=234, compaction requested=true 2024-12-16T17:58:14,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:14,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:14,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:14,416 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:14,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:14,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:14,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:14,416 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:14,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/B is initiating minor compaction (all files) 2024-12-16T17:58:14,417 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/B in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:14,417 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/988924f52f2d4b9bb7bf1392875e7daf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/179e27276dfa43b9bc007104d29e8c73, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/b6a68f7c4b354b43aaf979a48e13cd6c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0233c01804df412d90ab808e61a23bcd] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=47.9 K 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142118 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/A is initiating minor compaction (all files) 2024-12-16T17:58:14,417 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/A in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:14,417 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f87d41a2049246ce8cdabe8b68011780, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/983d516f234a4226af57f9ae117cd5f2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d3bf8291f8e54edeb58d279704020c3f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/52e2be065caf415097791716075815b1] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=138.8 K 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 988924f52f2d4b9bb7bf1392875e7daf, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734371888485 2024-12-16T17:58:14,417 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f87d41a2049246ce8cdabe8b68011780, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/983d516f234a4226af57f9ae117cd5f2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d3bf8291f8e54edeb58d279704020c3f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/52e2be065caf415097791716075815b1] 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 179e27276dfa43b9bc007104d29e8c73, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734371889692 2024-12-16T17:58:14,417 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f87d41a2049246ce8cdabe8b68011780, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734371888485 2024-12-16T17:58:14,418 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b6a68f7c4b354b43aaf979a48e13cd6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734371889786 2024-12-16T17:58:14,418 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 983d516f234a4226af57f9ae117cd5f2, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734371889657 2024-12-16T17:58:14,418 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0233c01804df412d90ab808e61a23bcd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734371891958 2024-12-16T17:58:14,418 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3bf8291f8e54edeb58d279704020c3f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734371889786 2024-12-16T17:58:14,418 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52e2be065caf415097791716075815b1, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734371891958 2024-12-16T17:58:14,424 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:14,431 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216671c116e26a7482497e8c9dd68b9c99d_5e6b04d28ec6af2428036942def2f402 store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:14,432 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#B#compaction#358 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:14,432 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216671c116e26a7482497e8c9dd68b9c99d_5e6b04d28ec6af2428036942def2f402, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:14,433 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216671c116e26a7482497e8c9dd68b9c99d_5e6b04d28ec6af2428036942def2f402 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:14,433 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/933988f4515349868b85d177db0515a5 is 50, key is test_row_0/B:col10/1734371893155/Put/seqid=0 2024-12-16T17:58:14,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742247_1423 (size=4469) 2024-12-16T17:58:14,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742248_1424 (size=12697) 2024-12-16T17:58:14,438 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#A#compaction#357 average throughput is 1.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:14,438 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/836869a9272f4c4b828b47a0e869eb23 is 175, key is test_row_0/A:col10/1734371893155/Put/seqid=0 2024-12-16T17:58:14,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742249_1425 (size=31651) 2024-12-16T17:58:14,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T17:58:14,721 INFO [Thread-1619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-16T17:58:14,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:14,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-16T17:58:14,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-16T17:58:14,723 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:14,723 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:14,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:14,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:58:14,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:14,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:14,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:14,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:14,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:14,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121636ef38f3952340c3a69828f6f47b27b5_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371894730/Put/seqid=0 2024-12-16T17:58:14,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742250_1426 (size=14794) 2024-12-16T17:58:14,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47374 deadline: 1734371954762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,768 DEBUG [Thread-1613 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., hostname=3609ad07831c,39733,1734371789085, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:14,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371954766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371954767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371954767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-16T17:58:14,841 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/933988f4515349868b85d177db0515a5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/933988f4515349868b85d177db0515a5 2024-12-16T17:58:14,848 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/B of 5e6b04d28ec6af2428036942def2f402 into 933988f4515349868b85d177db0515a5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:14,848 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:14,848 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/B, priority=12, startTime=1734371894416; duration=0sec 2024-12-16T17:58:14,848 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:14,848 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:B 2024-12-16T17:58:14,848 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:14,849 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:14,849 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/C is initiating minor compaction (all files) 2024-12-16T17:58:14,849 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/C in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:14,849 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/957082c3e51949f9be313487f1bd0179, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/31be58ec76ea4d5aaaa8bd6221c3908c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/35048e068dd345879b293cb17eae85ef, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/735c925dbb90443c9a5ace520f9aa630] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=47.9 K 2024-12-16T17:58:14,850 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 957082c3e51949f9be313487f1bd0179, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734371888485 2024-12-16T17:58:14,850 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 31be58ec76ea4d5aaaa8bd6221c3908c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734371889692 2024-12-16T17:58:14,851 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 35048e068dd345879b293cb17eae85ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734371889786 2024-12-16T17:58:14,851 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 735c925dbb90443c9a5ace520f9aa630, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734371891958 2024-12-16T17:58:14,852 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/836869a9272f4c4b828b47a0e869eb23 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/836869a9272f4c4b828b47a0e869eb23 2024-12-16T17:58:14,856 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/A of 5e6b04d28ec6af2428036942def2f402 into 836869a9272f4c4b828b47a0e869eb23(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:14,856 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:14,856 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/A, priority=12, startTime=1734371894416; duration=0sec 2024-12-16T17:58:14,856 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:14,856 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:A 2024-12-16T17:58:14,859 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#C#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:14,859 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/d29adca1b2a445efaa29229d700d50f4 is 50, key is test_row_0/C:col10/1734371893155/Put/seqid=0 2024-12-16T17:58:14,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742251_1427 (size=12697) 2024-12-16T17:58:14,870 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/d29adca1b2a445efaa29229d700d50f4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/d29adca1b2a445efaa29229d700d50f4 2024-12-16T17:58:14,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371954870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,874 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/C of 5e6b04d28ec6af2428036942def2f402 into d29adca1b2a445efaa29229d700d50f4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:14,874 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:14,874 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/C, priority=12, startTime=1734371894416; duration=0sec 2024-12-16T17:58:14,874 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:14,874 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:C 2024-12-16T17:58:14,875 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:14,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:14,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371954872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:14,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:14,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:14,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:14,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:14,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:14,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371954873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-16T17:58:15,027 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:15,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:15,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371955074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371955077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371955080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,140 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:15,143 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121636ef38f3952340c3a69828f6f47b27b5_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121636ef38f3952340c3a69828f6f47b27b5_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:15,144 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/cf254ef9fb2347d888cf32d526089f5f, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:15,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/cf254ef9fb2347d888cf32d526089f5f is 175, key is test_row_0/A:col10/1734371894730/Put/seqid=0 2024-12-16T17:58:15,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742252_1428 (size=39749) 2024-12-16T17:58:15,180 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:15,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:15,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-16T17:58:15,332 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:15,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:15,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371955340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371955377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371955382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371955384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:15,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:15,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,548 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/cf254ef9fb2347d888cf32d526089f5f 2024-12-16T17:58:15,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/4f06ac493baa4b478591d7648b0349c5 is 50, key is test_row_0/B:col10/1734371894730/Put/seqid=0 2024-12-16T17:58:15,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742253_1429 (size=12151) 2024-12-16T17:58:15,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:15,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:15,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:15,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:15,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-16T17:58:15,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371955885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371955890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:15,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371955890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:15,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:15,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:15,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:15,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:15,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/4f06ac493baa4b478591d7648b0349c5 2024-12-16T17:58:15,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/990249a6898e40edbafdbe15e541a4b5 is 50, key is test_row_0/C:col10/1734371894730/Put/seqid=0 2024-12-16T17:58:15,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742254_1430 (size=12151) 2024-12-16T17:58:16,095 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:16,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:16,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:16,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:16,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:16,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:16,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:16,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:16,248 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:16,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:16,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:16,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:16,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:16,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:16,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:16,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:16,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/990249a6898e40edbafdbe15e541a4b5 2024-12-16T17:58:16,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/cf254ef9fb2347d888cf32d526089f5f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/cf254ef9fb2347d888cf32d526089f5f 2024-12-16T17:58:16,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/cf254ef9fb2347d888cf32d526089f5f, entries=200, sequenceid=252, filesize=38.8 K 2024-12-16T17:58:16,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/4f06ac493baa4b478591d7648b0349c5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4f06ac493baa4b478591d7648b0349c5 2024-12-16T17:58:16,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4f06ac493baa4b478591d7648b0349c5, entries=150, sequenceid=252, filesize=11.9 K 2024-12-16T17:58:16,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/990249a6898e40edbafdbe15e541a4b5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/990249a6898e40edbafdbe15e541a4b5 2024-12-16T17:58:16,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/990249a6898e40edbafdbe15e541a4b5, entries=150, sequenceid=252, filesize=11.9 K 2024-12-16T17:58:16,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e6b04d28ec6af2428036942def2f402 in 1651ms, sequenceid=252, compaction requested=false 2024-12-16T17:58:16,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:16,400 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:16,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-16T17:58:16,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:16,401 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:58:16,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:16,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:16,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:16,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:16,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:16,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:16,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121634360ab573d049ec990520c57cb1583d_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371894765/Put/seqid=0 2024-12-16T17:58:16,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742255_1431 (size=12454) 2024-12-16T17:58:16,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:16,814 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121634360ab573d049ec990520c57cb1583d_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121634360ab573d049ec990520c57cb1583d_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:16,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/ac0c2efa641f4e418dfcf769c4c7c4a2, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:16,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/ac0c2efa641f4e418dfcf769c4c7c4a2 is 175, key is test_row_0/A:col10/1734371894765/Put/seqid=0 2024-12-16T17:58:16,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742256_1432 (size=31255) 2024-12-16T17:58:16,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-16T17:58:16,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:16,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:16,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371956922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:16,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:16,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371956923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:16,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:16,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371956924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371957029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371957029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371957030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,219 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=273, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/ac0c2efa641f4e418dfcf769c4c7c4a2 2024-12-16T17:58:17,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/ea153dd6f81c442bbdd80ea6388dd6a9 is 50, key is test_row_0/B:col10/1734371894765/Put/seqid=0 2024-12-16T17:58:17,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742257_1433 (size=12301) 2024-12-16T17:58:17,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371957231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371957234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371957235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47436 deadline: 1734371957349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,351 DEBUG [Thread-1617 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., hostname=3609ad07831c,39733,1734371789085, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:17,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371957536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371957539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371957539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:17,630 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/ea153dd6f81c442bbdd80ea6388dd6a9 2024-12-16T17:58:17,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/dce32d1d80984287ac14792b34596bb3 is 50, key is test_row_0/C:col10/1734371894765/Put/seqid=0 2024-12-16T17:58:17,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742258_1434 (size=12301) 2024-12-16T17:58:18,040 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/dce32d1d80984287ac14792b34596bb3 2024-12-16T17:58:18,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/ac0c2efa641f4e418dfcf769c4c7c4a2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/ac0c2efa641f4e418dfcf769c4c7c4a2 2024-12-16T17:58:18,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:18,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371958041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:18,049 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/ac0c2efa641f4e418dfcf769c4c7c4a2, entries=150, sequenceid=273, filesize=30.5 K 2024-12-16T17:58:18,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/ea153dd6f81c442bbdd80ea6388dd6a9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/ea153dd6f81c442bbdd80ea6388dd6a9 2024-12-16T17:58:18,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:18,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371958046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:18,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:18,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371958047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:18,053 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/ea153dd6f81c442bbdd80ea6388dd6a9, entries=150, sequenceid=273, filesize=12.0 K 2024-12-16T17:58:18,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/dce32d1d80984287ac14792b34596bb3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/dce32d1d80984287ac14792b34596bb3 2024-12-16T17:58:18,057 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/dce32d1d80984287ac14792b34596bb3, entries=150, sequenceid=273, filesize=12.0 K 2024-12-16T17:58:18,057 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e6b04d28ec6af2428036942def2f402 in 1656ms, sequenceid=273, compaction requested=true 2024-12-16T17:58:18,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:18,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:18,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-16T17:58:18,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-16T17:58:18,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-16T17:58:18,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3360 sec 2024-12-16T17:58:18,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 3.3380 sec 2024-12-16T17:58:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-16T17:58:18,827 INFO [Thread-1619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-16T17:58:18,828 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-16T17:58:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-16T17:58:18,829 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:18,829 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:18,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-16T17:58:18,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:18,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-16T17:58:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:18,981 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:58:18,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:18,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:18,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:18,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:18,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:18,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:18,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b3eef26c55514441b8715a4c307bcff9_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371896922/Put/seqid=0 2024-12-16T17:58:18,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742259_1435 (size=12454) 2024-12-16T17:58:19,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. as already flushing 2024-12-16T17:58:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:19,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371959084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371959084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371959090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-16T17:58:19,141 DEBUG [Thread-1620 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49ac632a to 127.0.0.1:49190 2024-12-16T17:58:19,141 DEBUG [Thread-1620 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:19,146 DEBUG [Thread-1628 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x57e76ace to 127.0.0.1:49190 2024-12-16T17:58:19,146 DEBUG [Thread-1628 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:19,146 DEBUG [Thread-1622 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x361d57a1 to 127.0.0.1:49190 2024-12-16T17:58:19,146 DEBUG [Thread-1622 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:19,146 DEBUG [Thread-1624 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x772c186a to 127.0.0.1:49190 2024-12-16T17:58:19,146 DEBUG [Thread-1624 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:19,150 DEBUG [Thread-1626 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c68a792 to 127.0.0.1:49190 2024-12-16T17:58:19,150 DEBUG [Thread-1626 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:19,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371959192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371959191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371959196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:19,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371959395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371959395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371959399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,400 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b3eef26c55514441b8715a4c307bcff9_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b3eef26c55514441b8715a4c307bcff9_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:19,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/00e92b5563394de980e85844a8fb28df, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:19,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/00e92b5563394de980e85844a8fb28df is 175, key is test_row_0/A:col10/1734371896922/Put/seqid=0 2024-12-16T17:58:19,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742260_1436 (size=31255) 2024-12-16T17:58:19,406 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/00e92b5563394de980e85844a8fb28df 2024-12-16T17:58:19,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/4c3ba524ca894bf9b195552c06cd9802 is 50, key is test_row_0/B:col10/1734371896922/Put/seqid=0 2024-12-16T17:58:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742261_1437 (size=12301) 2024-12-16T17:58:19,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-16T17:58:19,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371959698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371959699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:19,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371959703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:19,818 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/4c3ba524ca894bf9b195552c06cd9802 2024-12-16T17:58:19,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/181be30d7d3f4d56931511326733cda6 is 50, key is test_row_0/C:col10/1734371896922/Put/seqid=0 2024-12-16T17:58:19,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742262_1438 (size=12301) 2024-12-16T17:58:19,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-16T17:58:20,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:20,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47422 deadline: 1734371960201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:20,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:20,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47372 deadline: 1734371960203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:20,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:20,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47356 deadline: 1734371960208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:20,236 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/181be30d7d3f4d56931511326733cda6 2024-12-16T17:58:20,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/00e92b5563394de980e85844a8fb28df as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/00e92b5563394de980e85844a8fb28df 2024-12-16T17:58:20,251 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/00e92b5563394de980e85844a8fb28df, entries=150, sequenceid=291, filesize=30.5 K 2024-12-16T17:58:20,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/4c3ba524ca894bf9b195552c06cd9802 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4c3ba524ca894bf9b195552c06cd9802 2024-12-16T17:58:20,258 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4c3ba524ca894bf9b195552c06cd9802, entries=150, sequenceid=291, filesize=12.0 K 2024-12-16T17:58:20,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/181be30d7d3f4d56931511326733cda6 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/181be30d7d3f4d56931511326733cda6 2024-12-16T17:58:20,264 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/181be30d7d3f4d56931511326733cda6, entries=150, sequenceid=291, filesize=12.0 K 2024-12-16T17:58:20,265 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 5e6b04d28ec6af2428036942def2f402 in 1284ms, sequenceid=291, compaction requested=true 2024-12-16T17:58:20,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:20,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:20,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-16T17:58:20,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-16T17:58:20,268 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-16T17:58:20,268 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4370 sec 2024-12-16T17:58:20,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.4400 sec 2024-12-16T17:58:20,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-16T17:58:20,934 INFO [Thread-1619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-16T17:58:21,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:21,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-16T17:58:21,207 DEBUG [Thread-1609 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x098697cd to 127.0.0.1:49190 2024-12-16T17:58:21,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:21,207 DEBUG [Thread-1609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:21,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:21,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:21,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:21,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:21,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:21,213 DEBUG [Thread-1615 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dfb3ff1 to 127.0.0.1:49190 2024-12-16T17:58:21,213 DEBUG [Thread-1615 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:21,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121616b21471bbcc45d4b77ed6486b9d8e64_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371899084/Put/seqid=0 2024-12-16T17:58:21,214 DEBUG [Thread-1611 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f6b3f8c to 127.0.0.1:49190 2024-12-16T17:58:21,214 DEBUG [Thread-1611 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:21,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742263_1439 (size=12454) 2024-12-16T17:58:21,353 DEBUG [Thread-1617 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0efb8efe to 127.0.0.1:49190 2024-12-16T17:58:21,354 DEBUG [Thread-1617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:21,619 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:21,627 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121616b21471bbcc45d4b77ed6486b9d8e64_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121616b21471bbcc45d4b77ed6486b9d8e64_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:21,629 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d504952db3ec4042a9e17120fd97ebee, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:21,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d504952db3ec4042a9e17120fd97ebee is 175, key is test_row_0/A:col10/1734371899084/Put/seqid=0 2024-12-16T17:58:21,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742264_1440 (size=31255) 2024-12-16T17:58:22,038 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=310, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d504952db3ec4042a9e17120fd97ebee 2024-12-16T17:58:22,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0b5f38f7148a41abb9d9c0626a95f882 is 50, key is test_row_0/B:col10/1734371899084/Put/seqid=0 2024-12-16T17:58:22,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742265_1441 (size=12301) 2024-12-16T17:58:22,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0b5f38f7148a41abb9d9c0626a95f882 2024-12-16T17:58:22,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/72e8c3cd31c44f6d8fdd342ceb136a5e is 50, key is test_row_0/C:col10/1734371899084/Put/seqid=0 2024-12-16T17:58:22,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742266_1442 (size=12301) 2024-12-16T17:58:22,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/72e8c3cd31c44f6d8fdd342ceb136a5e 2024-12-16T17:58:22,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/d504952db3ec4042a9e17120fd97ebee as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d504952db3ec4042a9e17120fd97ebee 2024-12-16T17:58:22,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d504952db3ec4042a9e17120fd97ebee, entries=150, sequenceid=310, filesize=30.5 K 2024-12-16T17:58:22,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/0b5f38f7148a41abb9d9c0626a95f882 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0b5f38f7148a41abb9d9c0626a95f882 2024-12-16T17:58:22,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0b5f38f7148a41abb9d9c0626a95f882, entries=150, sequenceid=310, filesize=12.0 K 2024-12-16T17:58:22,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/72e8c3cd31c44f6d8fdd342ceb136a5e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/72e8c3cd31c44f6d8fdd342ceb136a5e 2024-12-16T17:58:22,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/72e8c3cd31c44f6d8fdd342ceb136a5e, entries=150, sequenceid=310, filesize=12.0 K 2024-12-16T17:58:22,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=20.13 KB/20610 for 5e6b04d28ec6af2428036942def2f402 in 1686ms, sequenceid=310, compaction requested=true 2024-12-16T17:58:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e6b04d28ec6af2428036942def2f402:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:22,893 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-16T17:58:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:22,893 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-16T17:58:22,894 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 165165 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-16T17:58:22,894 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61751 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-16T17:58:22,894 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/B is initiating minor compaction (all files) 2024-12-16T17:58:22,894 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/A is initiating minor compaction (all files) 2024-12-16T17:58:22,895 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/A in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:22,895 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/B in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:22,895 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/933988f4515349868b85d177db0515a5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4f06ac493baa4b478591d7648b0349c5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/ea153dd6f81c442bbdd80ea6388dd6a9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4c3ba524ca894bf9b195552c06cd9802, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0b5f38f7148a41abb9d9c0626a95f882] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=60.3 K 2024-12-16T17:58:22,895 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/836869a9272f4c4b828b47a0e869eb23, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/cf254ef9fb2347d888cf32d526089f5f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/ac0c2efa641f4e418dfcf769c4c7c4a2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/00e92b5563394de980e85844a8fb28df, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d504952db3ec4042a9e17120fd97ebee] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=161.3 K 2024-12-16T17:58:22,895 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/836869a9272f4c4b828b47a0e869eb23, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/cf254ef9fb2347d888cf32d526089f5f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/ac0c2efa641f4e418dfcf769c4c7c4a2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/00e92b5563394de980e85844a8fb28df, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d504952db3ec4042a9e17120fd97ebee] 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 933988f4515349868b85d177db0515a5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734371891958 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 836869a9272f4c4b828b47a0e869eb23, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734371891958 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f06ac493baa4b478591d7648b0349c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734371893202 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf254ef9fb2347d888cf32d526089f5f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734371893202 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ea153dd6f81c442bbdd80ea6388dd6a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1734371894765 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac0c2efa641f4e418dfcf769c4c7c4a2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1734371894765 2024-12-16T17:58:22,895 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c3ba524ca894bf9b195552c06cd9802, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371896917 2024-12-16T17:58:22,896 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b5f38f7148a41abb9d9c0626a95f882, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734371899083 2024-12-16T17:58:22,896 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00e92b5563394de980e85844a8fb28df, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371896917 2024-12-16T17:58:22,896 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting d504952db3ec4042a9e17120fd97ebee, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734371899083 2024-12-16T17:58:22,903 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#B#compaction#372 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:22,904 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/577c3a8c311b401b9cbb6a7b964b9f59 is 50, key is test_row_0/B:col10/1734371899084/Put/seqid=0 2024-12-16T17:58:22,906 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:22,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742267_1443 (size=13017) 2024-12-16T17:58:22,913 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412160b7bd1702b1b4be0810c043aec1942f8_5e6b04d28ec6af2428036942def2f402 store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:22,939 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412160b7bd1702b1b4be0810c043aec1942f8_5e6b04d28ec6af2428036942def2f402, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:22,939 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412160b7bd1702b1b4be0810c043aec1942f8_5e6b04d28ec6af2428036942def2f402 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742268_1444 (size=4469) 2024-12-16T17:58:23,315 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/577c3a8c311b401b9cbb6a7b964b9f59 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/577c3a8c311b401b9cbb6a7b964b9f59 2024-12-16T17:58:23,321 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/B of 5e6b04d28ec6af2428036942def2f402 into 577c3a8c311b401b9cbb6a7b964b9f59(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:23,321 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:23,321 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/B, priority=11, startTime=1734371902893; duration=0sec 2024-12-16T17:58:23,321 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:23,321 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:B 2024-12-16T17:58:23,321 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-16T17:58:23,323 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61751 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-16T17:58:23,323 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 5e6b04d28ec6af2428036942def2f402/C is initiating minor compaction (all files) 2024-12-16T17:58:23,323 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5e6b04d28ec6af2428036942def2f402/C in TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:23,324 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/d29adca1b2a445efaa29229d700d50f4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/990249a6898e40edbafdbe15e541a4b5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/dce32d1d80984287ac14792b34596bb3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/181be30d7d3f4d56931511326733cda6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/72e8c3cd31c44f6d8fdd342ceb136a5e] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp, totalSize=60.3 K 2024-12-16T17:58:23,324 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d29adca1b2a445efaa29229d700d50f4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1734371891958 2024-12-16T17:58:23,325 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 990249a6898e40edbafdbe15e541a4b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734371893202 2024-12-16T17:58:23,325 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting dce32d1d80984287ac14792b34596bb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1734371894765 2024-12-16T17:58:23,326 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 181be30d7d3f4d56931511326733cda6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371896917 2024-12-16T17:58:23,326 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 72e8c3cd31c44f6d8fdd342ceb136a5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734371899083 2024-12-16T17:58:23,342 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#C#compaction#374 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:23,342 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/e05163d8e8d84410a5bb7c6fe601feba is 50, key is test_row_0/C:col10/1734371899084/Put/seqid=0 2024-12-16T17:58:23,343 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e6b04d28ec6af2428036942def2f402#A#compaction#373 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:23,344 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/9e4b7c8ccc4b4524b07001cbe6d43d9c is 175, key is test_row_0/A:col10/1734371899084/Put/seqid=0 2024-12-16T17:58:23,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742269_1445 (size=13017) 2024-12-16T17:58:23,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742270_1446 (size=31971) 2024-12-16T17:58:23,753 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/e05163d8e8d84410a5bb7c6fe601feba as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/e05163d8e8d84410a5bb7c6fe601feba 2024-12-16T17:58:23,753 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/9e4b7c8ccc4b4524b07001cbe6d43d9c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/9e4b7c8ccc4b4524b07001cbe6d43d9c 2024-12-16T17:58:23,759 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/C of 5e6b04d28ec6af2428036942def2f402 into e05163d8e8d84410a5bb7c6fe601feba(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:23,759 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5e6b04d28ec6af2428036942def2f402/A of 5e6b04d28ec6af2428036942def2f402 into 9e4b7c8ccc4b4524b07001cbe6d43d9c(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:23,759 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:23,759 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:23,759 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/C, priority=11, startTime=1734371902893; duration=0sec 2024-12-16T17:58:23,759 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402., storeName=5e6b04d28ec6af2428036942def2f402/A, priority=11, startTime=1734371902893; duration=0sec 2024-12-16T17:58:23,759 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:23,759 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:A 2024-12-16T17:58:23,759 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:23,759 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e6b04d28ec6af2428036942def2f402:C 2024-12-16T17:58:24,818 DEBUG [Thread-1613 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63a1fd83 to 127.0.0.1:49190 2024-12-16T17:58:24,818 DEBUG [Thread-1613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:24,818 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3137 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9411 rows 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3122 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9366 rows 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3136 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9408 rows 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3154 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9462 rows 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3129 2024-12-16T17:58:24,819 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9387 rows 2024-12-16T17:58:24,819 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T17:58:24,820 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c663007 to 127.0.0.1:49190 2024-12-16T17:58:24,820 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:24,823 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-16T17:58:24,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-16T17:58:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T17:58:24,826 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371904826"}]},"ts":"1734371904826"} 2024-12-16T17:58:24,827 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-16T17:58:24,878 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-16T17:58:24,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:58:24,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, UNASSIGN}] 2024-12-16T17:58:24,884 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, UNASSIGN 2024-12-16T17:58:24,885 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:24,887 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:58:24,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; CloseRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:58:24,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T17:58:25,040 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:25,040 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(124): Close 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:25,040 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1681): Closing 5e6b04d28ec6af2428036942def2f402, disabling compactions & flushes 2024-12-16T17:58:25,041 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. after waiting 0 ms 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:25,041 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2837): Flushing 5e6b04d28ec6af2428036942def2f402 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=A 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=B 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5e6b04d28ec6af2428036942def2f402, store=C 2024-12-16T17:58:25,041 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:25,046 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121686daacd76f69416d9a94f424c18f4c77_5e6b04d28ec6af2428036942def2f402 is 50, key is test_row_0/A:col10/1734371901352/Put/seqid=0 2024-12-16T17:58:25,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742271_1447 (size=9914) 2024-12-16T17:58:25,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T17:58:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T17:58:25,451 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:25,459 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121686daacd76f69416d9a94f424c18f4c77_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121686daacd76f69416d9a94f424c18f4c77_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:25,461 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/abee30777cc54010a7874f31940a9f29, store: [table=TestAcidGuarantees family=A region=5e6b04d28ec6af2428036942def2f402] 2024-12-16T17:58:25,461 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/abee30777cc54010a7874f31940a9f29 is 175, key is test_row_0/A:col10/1734371901352/Put/seqid=0 2024-12-16T17:58:25,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742272_1448 (size=22561) 2024-12-16T17:58:25,866 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=320, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/abee30777cc54010a7874f31940a9f29 2024-12-16T17:58:25,880 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/2fb56c37a174425fb2d6bb46ce28425b is 50, key is test_row_0/B:col10/1734371901352/Put/seqid=0 2024-12-16T17:58:25,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742273_1449 (size=9857) 2024-12-16T17:58:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T17:58:26,291 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/2fb56c37a174425fb2d6bb46ce28425b 2024-12-16T17:58:26,303 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/6137baad36144928acffc4aaac259105 is 50, key is test_row_0/C:col10/1734371901352/Put/seqid=0 2024-12-16T17:58:26,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742274_1450 (size=9857) 2024-12-16T17:58:26,708 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/6137baad36144928acffc4aaac259105 2024-12-16T17:58:26,718 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/A/abee30777cc54010a7874f31940a9f29 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/abee30777cc54010a7874f31940a9f29 2024-12-16T17:58:26,723 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/abee30777cc54010a7874f31940a9f29, entries=100, sequenceid=320, filesize=22.0 K 2024-12-16T17:58:26,724 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/B/2fb56c37a174425fb2d6bb46ce28425b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2fb56c37a174425fb2d6bb46ce28425b 2024-12-16T17:58:26,728 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2fb56c37a174425fb2d6bb46ce28425b, entries=100, sequenceid=320, filesize=9.6 K 2024-12-16T17:58:26,729 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/.tmp/C/6137baad36144928acffc4aaac259105 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6137baad36144928acffc4aaac259105 2024-12-16T17:58:26,733 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6137baad36144928acffc4aaac259105, entries=100, sequenceid=320, filesize=9.6 K 2024-12-16T17:58:26,734 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 5e6b04d28ec6af2428036942def2f402 in 1693ms, sequenceid=320, compaction requested=false 2024-12-16T17:58:26,735 DEBUG [StoreCloser-TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/4d537f7d32ca4a95982b66a8a2c30b1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/38300227c1724ad59d9680ea2858ef85, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/5fc6f2d804d94ef6b05d6b8ca71339a1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/8a504e4f410d4bdfb5b22a41788b9749, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fc427cf7f423422c88079bf6f96747aa, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f0adba6462a54448bbcac5657e7e2978, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/57a5450f19e440f7aa07d7722a61620d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fac681912eeb4b139f3a8776ee5e61dd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/312b6669a37549bf8d85ee4c7c8b57da, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/c47d3a1292c845c790b0719bf3c8aeb5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fcbe8a2e0593421ea0af3408a6114fea, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/a6c965499bc448c5b211ee6f00a40ec9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f87d41a2049246ce8cdabe8b68011780, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/983d516f234a4226af57f9ae117cd5f2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d3bf8291f8e54edeb58d279704020c3f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/52e2be065caf415097791716075815b1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/836869a9272f4c4b828b47a0e869eb23, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/cf254ef9fb2347d888cf32d526089f5f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/ac0c2efa641f4e418dfcf769c4c7c4a2, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/00e92b5563394de980e85844a8fb28df, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d504952db3ec4042a9e17120fd97ebee] to archive 2024-12-16T17:58:26,736 DEBUG [StoreCloser-TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:58:26,738 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/4d537f7d32ca4a95982b66a8a2c30b1b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/4d537f7d32ca4a95982b66a8a2c30b1b 2024-12-16T17:58:26,738 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/38300227c1724ad59d9680ea2858ef85 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/38300227c1724ad59d9680ea2858ef85 2024-12-16T17:58:26,738 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/5fc6f2d804d94ef6b05d6b8ca71339a1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/5fc6f2d804d94ef6b05d6b8ca71339a1 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f0adba6462a54448bbcac5657e7e2978 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f0adba6462a54448bbcac5657e7e2978 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fc427cf7f423422c88079bf6f96747aa to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fc427cf7f423422c88079bf6f96747aa 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/8a504e4f410d4bdfb5b22a41788b9749 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/8a504e4f410d4bdfb5b22a41788b9749 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fac681912eeb4b139f3a8776ee5e61dd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fac681912eeb4b139f3a8776ee5e61dd 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fcbe8a2e0593421ea0af3408a6114fea to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/fcbe8a2e0593421ea0af3408a6114fea 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/57a5450f19e440f7aa07d7722a61620d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/57a5450f19e440f7aa07d7722a61620d 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/c47d3a1292c845c790b0719bf3c8aeb5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/c47d3a1292c845c790b0719bf3c8aeb5 2024-12-16T17:58:26,742 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/312b6669a37549bf8d85ee4c7c8b57da to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/312b6669a37549bf8d85ee4c7c8b57da 2024-12-16T17:58:26,743 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/a6c965499bc448c5b211ee6f00a40ec9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/a6c965499bc448c5b211ee6f00a40ec9 2024-12-16T17:58:26,743 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/983d516f234a4226af57f9ae117cd5f2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/983d516f234a4226af57f9ae117cd5f2 2024-12-16T17:58:26,743 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f87d41a2049246ce8cdabe8b68011780 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/f87d41a2049246ce8cdabe8b68011780 2024-12-16T17:58:26,744 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d3bf8291f8e54edeb58d279704020c3f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d3bf8291f8e54edeb58d279704020c3f 2024-12-16T17:58:26,744 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/836869a9272f4c4b828b47a0e869eb23 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/836869a9272f4c4b828b47a0e869eb23 2024-12-16T17:58:26,744 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/ac0c2efa641f4e418dfcf769c4c7c4a2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/ac0c2efa641f4e418dfcf769c4c7c4a2 2024-12-16T17:58:26,744 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/52e2be065caf415097791716075815b1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/52e2be065caf415097791716075815b1 2024-12-16T17:58:26,744 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/cf254ef9fb2347d888cf32d526089f5f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/cf254ef9fb2347d888cf32d526089f5f 2024-12-16T17:58:26,745 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d504952db3ec4042a9e17120fd97ebee to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/d504952db3ec4042a9e17120fd97ebee 2024-12-16T17:58:26,745 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/00e92b5563394de980e85844a8fb28df to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/00e92b5563394de980e85844a8fb28df 2024-12-16T17:58:26,748 DEBUG [StoreCloser-TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2e8420cf5f2f4939a168ac84e01b764f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8a2baffa7ac4499bb68475f5e98c5d84, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/cdc5532d91be4200bd2896affeee65bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/60be5871b2414931a931a63f37183210, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/71ffb2b20f344c458b9bc7222ab20c63, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8d47a7a801f5421d8007a817bf4dae42, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/056a3964759f478f85f04a0919436ac4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/03f75cd228fd4363950b2e374c40e59b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/6fe9832ac64048228d31b3825a0726eb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0898b6b0f6fe4b4387a9c739002b19c1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/f693f1fe45384cf7a40e44b320b487d1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/988924f52f2d4b9bb7bf1392875e7daf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/870a6df549f140bf8c29855aa8ec89fa, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/179e27276dfa43b9bc007104d29e8c73, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/b6a68f7c4b354b43aaf979a48e13cd6c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/933988f4515349868b85d177db0515a5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0233c01804df412d90ab808e61a23bcd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4f06ac493baa4b478591d7648b0349c5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/ea153dd6f81c442bbdd80ea6388dd6a9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4c3ba524ca894bf9b195552c06cd9802, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0b5f38f7148a41abb9d9c0626a95f882] to archive 2024-12-16T17:58:26,749 DEBUG [StoreCloser-TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:58:26,751 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2e8420cf5f2f4939a168ac84e01b764f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2e8420cf5f2f4939a168ac84e01b764f 2024-12-16T17:58:26,751 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/cdc5532d91be4200bd2896affeee65bf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/cdc5532d91be4200bd2896affeee65bf 2024-12-16T17:58:26,751 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/60be5871b2414931a931a63f37183210 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/60be5871b2414931a931a63f37183210 2024-12-16T17:58:26,751 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/03f75cd228fd4363950b2e374c40e59b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/03f75cd228fd4363950b2e374c40e59b 2024-12-16T17:58:26,751 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8d47a7a801f5421d8007a817bf4dae42 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8d47a7a801f5421d8007a817bf4dae42 2024-12-16T17:58:26,751 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8a2baffa7ac4499bb68475f5e98c5d84 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/8a2baffa7ac4499bb68475f5e98c5d84 2024-12-16T17:58:26,751 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/71ffb2b20f344c458b9bc7222ab20c63 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/71ffb2b20f344c458b9bc7222ab20c63 2024-12-16T17:58:26,752 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/056a3964759f478f85f04a0919436ac4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/056a3964759f478f85f04a0919436ac4 2024-12-16T17:58:26,752 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/6fe9832ac64048228d31b3825a0726eb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/6fe9832ac64048228d31b3825a0726eb 2024-12-16T17:58:26,752 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0898b6b0f6fe4b4387a9c739002b19c1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0898b6b0f6fe4b4387a9c739002b19c1 2024-12-16T17:58:26,752 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/f693f1fe45384cf7a40e44b320b487d1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/f693f1fe45384cf7a40e44b320b487d1 2024-12-16T17:58:26,753 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/870a6df549f140bf8c29855aa8ec89fa to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/870a6df549f140bf8c29855aa8ec89fa 2024-12-16T17:58:26,753 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/988924f52f2d4b9bb7bf1392875e7daf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/988924f52f2d4b9bb7bf1392875e7daf 2024-12-16T17:58:26,753 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/933988f4515349868b85d177db0515a5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/933988f4515349868b85d177db0515a5 2024-12-16T17:58:26,753 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/179e27276dfa43b9bc007104d29e8c73 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/179e27276dfa43b9bc007104d29e8c73 2024-12-16T17:58:26,753 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/b6a68f7c4b354b43aaf979a48e13cd6c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/b6a68f7c4b354b43aaf979a48e13cd6c 2024-12-16T17:58:26,753 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0233c01804df412d90ab808e61a23bcd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0233c01804df412d90ab808e61a23bcd 2024-12-16T17:58:26,753 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4f06ac493baa4b478591d7648b0349c5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4f06ac493baa4b478591d7648b0349c5 2024-12-16T17:58:26,754 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/ea153dd6f81c442bbdd80ea6388dd6a9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/ea153dd6f81c442bbdd80ea6388dd6a9 2024-12-16T17:58:26,754 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4c3ba524ca894bf9b195552c06cd9802 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/4c3ba524ca894bf9b195552c06cd9802 2024-12-16T17:58:26,754 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0b5f38f7148a41abb9d9c0626a95f882 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/0b5f38f7148a41abb9d9c0626a95f882 2024-12-16T17:58:26,755 DEBUG [StoreCloser-TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/2d7fa735d35d4245995f7015ad10ba94, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/8d52d9feda8941c9874a5e7a4cc797a1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/ac3ef196633e405c95ae4d1518ac1940, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6e2bb988e30a4629a84c26f8a9086745, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/095f0fffe8c643669295993ff2d2940d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/b532c1b87a8e44d98263e607d6e41058, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/486e5acf997541219ae6c4984ca3607b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/a451c1cba1bc42b8ac4cac6c89fc3d91, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/24805f2fc07b4eed82aa2fcd12be72bf, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/c0af2f1513bf436f8ed595165039d51f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/063c2529541b4ec9b3852891d8a08f0a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/957082c3e51949f9be313487f1bd0179, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/0acd56e749bc4aa0abe8516f1fe4b094, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/31be58ec76ea4d5aaaa8bd6221c3908c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/35048e068dd345879b293cb17eae85ef, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/d29adca1b2a445efaa29229d700d50f4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/735c925dbb90443c9a5ace520f9aa630, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/990249a6898e40edbafdbe15e541a4b5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/dce32d1d80984287ac14792b34596bb3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/181be30d7d3f4d56931511326733cda6, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/72e8c3cd31c44f6d8fdd342ceb136a5e] to archive 2024-12-16T17:58:26,755 DEBUG [StoreCloser-TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:58:26,756 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/2d7fa735d35d4245995f7015ad10ba94 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/2d7fa735d35d4245995f7015ad10ba94 2024-12-16T17:58:26,757 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/8d52d9feda8941c9874a5e7a4cc797a1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/8d52d9feda8941c9874a5e7a4cc797a1 2024-12-16T17:58:26,757 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/095f0fffe8c643669295993ff2d2940d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/095f0fffe8c643669295993ff2d2940d 2024-12-16T17:58:26,757 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6e2bb988e30a4629a84c26f8a9086745 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6e2bb988e30a4629a84c26f8a9086745 2024-12-16T17:58:26,757 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/b532c1b87a8e44d98263e607d6e41058 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/b532c1b87a8e44d98263e607d6e41058 2024-12-16T17:58:26,757 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/ac3ef196633e405c95ae4d1518ac1940 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/ac3ef196633e405c95ae4d1518ac1940 2024-12-16T17:58:26,757 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/486e5acf997541219ae6c4984ca3607b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/486e5acf997541219ae6c4984ca3607b 2024-12-16T17:58:26,757 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/a451c1cba1bc42b8ac4cac6c89fc3d91 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/a451c1cba1bc42b8ac4cac6c89fc3d91 2024-12-16T17:58:26,758 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/24805f2fc07b4eed82aa2fcd12be72bf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/24805f2fc07b4eed82aa2fcd12be72bf 2024-12-16T17:58:26,758 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/957082c3e51949f9be313487f1bd0179 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/957082c3e51949f9be313487f1bd0179 2024-12-16T17:58:26,758 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/c0af2f1513bf436f8ed595165039d51f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/c0af2f1513bf436f8ed595165039d51f 2024-12-16T17:58:26,758 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/063c2529541b4ec9b3852891d8a08f0a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/063c2529541b4ec9b3852891d8a08f0a 2024-12-16T17:58:26,758 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/0acd56e749bc4aa0abe8516f1fe4b094 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/0acd56e749bc4aa0abe8516f1fe4b094 2024-12-16T17:58:26,759 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/31be58ec76ea4d5aaaa8bd6221c3908c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/31be58ec76ea4d5aaaa8bd6221c3908c 2024-12-16T17:58:26,759 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/d29adca1b2a445efaa29229d700d50f4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/d29adca1b2a445efaa29229d700d50f4 2024-12-16T17:58:26,759 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/735c925dbb90443c9a5ace520f9aa630 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/735c925dbb90443c9a5ace520f9aa630 2024-12-16T17:58:26,759 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/dce32d1d80984287ac14792b34596bb3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/dce32d1d80984287ac14792b34596bb3 2024-12-16T17:58:26,759 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/990249a6898e40edbafdbe15e541a4b5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/990249a6898e40edbafdbe15e541a4b5 2024-12-16T17:58:26,759 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/181be30d7d3f4d56931511326733cda6 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/181be30d7d3f4d56931511326733cda6 2024-12-16T17:58:26,759 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/72e8c3cd31c44f6d8fdd342ceb136a5e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/72e8c3cd31c44f6d8fdd342ceb136a5e 2024-12-16T17:58:26,760 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/35048e068dd345879b293cb17eae85ef to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/35048e068dd345879b293cb17eae85ef 2024-12-16T17:58:26,763 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/recovered.edits/323.seqid, newMaxSeqId=323, maxSeqId=4 2024-12-16T17:58:26,763 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402. 2024-12-16T17:58:26,763 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1635): Region close journal for 5e6b04d28ec6af2428036942def2f402: 2024-12-16T17:58:26,765 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(170): Closed 5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,765 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=5e6b04d28ec6af2428036942def2f402, regionState=CLOSED 2024-12-16T17:58:26,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-16T17:58:26,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; CloseRegionProcedure 5e6b04d28ec6af2428036942def2f402, server=3609ad07831c,39733,1734371789085 in 1.8790 sec 2024-12-16T17:58:26,768 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-12-16T17:58:26,768 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5e6b04d28ec6af2428036942def2f402, UNASSIGN in 1.8850 sec 2024-12-16T17:58:26,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-16T17:58:26,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8880 sec 2024-12-16T17:58:26,770 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371906770"}]},"ts":"1734371906770"} 2024-12-16T17:58:26,771 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-16T17:58:26,803 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-16T17:58:26,805 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9810 sec 2024-12-16T17:58:26,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T17:58:26,934 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-16T17:58:26,935 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-16T17:58:26,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:26,938 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:26,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-16T17:58:26,940 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=114, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:26,944 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,949 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/recovered.edits] 2024-12-16T17:58:26,953 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/9e4b7c8ccc4b4524b07001cbe6d43d9c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/9e4b7c8ccc4b4524b07001cbe6d43d9c 2024-12-16T17:58:26,953 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/abee30777cc54010a7874f31940a9f29 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/A/abee30777cc54010a7874f31940a9f29 2024-12-16T17:58:26,956 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2fb56c37a174425fb2d6bb46ce28425b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/2fb56c37a174425fb2d6bb46ce28425b 2024-12-16T17:58:26,956 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/577c3a8c311b401b9cbb6a7b964b9f59 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/B/577c3a8c311b401b9cbb6a7b964b9f59 2024-12-16T17:58:26,958 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6137baad36144928acffc4aaac259105 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/6137baad36144928acffc4aaac259105 2024-12-16T17:58:26,958 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/e05163d8e8d84410a5bb7c6fe601feba to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/C/e05163d8e8d84410a5bb7c6fe601feba 2024-12-16T17:58:26,960 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/recovered.edits/323.seqid to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402/recovered.edits/323.seqid 2024-12-16T17:58:26,961 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,961 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-16T17:58:26,961 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-16T17:58:26,962 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-16T17:58:26,966 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121636ef38f3952340c3a69828f6f47b27b5_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121636ef38f3952340c3a69828f6f47b27b5_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,967 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121616b21471bbcc45d4b77ed6486b9d8e64_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121616b21471bbcc45d4b77ed6486b9d8e64_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,967 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412160f7ab92de5e34392953c4fdba925b934_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412160f7ab92de5e34392953c4fdba925b934_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,967 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168cb75d6985444a4e849602bab1985ff0_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168cb75d6985444a4e849602bab1985ff0_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,967 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121634360ab573d049ec990520c57cb1583d_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121634360ab573d049ec990520c57cb1583d_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,967 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412167512855679534aa9b90ec84cce405956_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412167512855679534aa9b90ec84cce405956_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,967 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412160877023579804a7b8c88b3d6d5f95d7f_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412160877023579804a7b8c88b3d6d5f95d7f_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,967 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121686daacd76f69416d9a94f424c18f4c77_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121686daacd76f69416d9a94f424c18f4c77_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121699add50dc6ef49fa9a7af6291b6f467b_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121699add50dc6ef49fa9a7af6291b6f467b_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a22712a1c2da43d6afc255f4512ad2a0_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a22712a1c2da43d6afc255f4512ad2a0_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a01d3f929dc2480eac10ab8af18a8c5f_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a01d3f929dc2480eac10ab8af18a8c5f_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b3eef26c55514441b8715a4c307bcff9_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b3eef26c55514441b8715a4c307bcff9_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a33c2cae0a9241ed8c183e17cb2476b7_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a33c2cae0a9241ed8c183e17cb2476b7_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b8da11a678514ad390cf62eb042afe11_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b8da11a678514ad390cf62eb042afe11_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216c3ed0994eda04006a45121d220f3866a_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216c3ed0994eda04006a45121d220f3866a_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216d79fc3e569b74cd88adc6775b9bcb4be_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216d79fc3e569b74cd88adc6775b9bcb4be_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,968 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216efcbed4337fd488c9ca7a6af6fb5efef_5e6b04d28ec6af2428036942def2f402 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216efcbed4337fd488c9ca7a6af6fb5efef_5e6b04d28ec6af2428036942def2f402 2024-12-16T17:58:26,969 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-16T17:58:26,971 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=114, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:26,972 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-16T17:58:26,974 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-16T17:58:26,974 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=114, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:26,975 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-16T17:58:26,975 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734371906975"}]},"ts":"9223372036854775807"} 2024-12-16T17:58:26,976 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T17:58:26,976 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5e6b04d28ec6af2428036942def2f402, NAME => 'TestAcidGuarantees,,1734371876313.5e6b04d28ec6af2428036942def2f402.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T17:58:26,976 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-16T17:58:26,976 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734371906976"}]},"ts":"9223372036854775807"} 2024-12-16T17:58:26,978 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-16T17:58:26,987 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=114, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:26,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 52 msec 2024-12-16T17:58:27,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-16T17:58:27,041 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-16T17:58:27,055 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=247 (was 245) - Thread LEAK? -, OpenFileDescriptor=459 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=453 (was 539), ProcessCount=11 (was 11), AvailableMemoryMB=3099 (was 3191) 2024-12-16T17:58:27,066 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=247, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=453, ProcessCount=11, AvailableMemoryMB=3099 2024-12-16T17:58:27,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:58:27,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:58:27,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:27,070 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T17:58:27,070 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:27,070 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 115 2024-12-16T17:58:27,071 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T17:58:27,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T17:58:27,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742275_1451 (size=963) 2024-12-16T17:58:27,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T17:58:27,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T17:58:27,483 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:58:27,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742276_1452 (size=53) 2024-12-16T17:58:27,535 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T17:58:27,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T17:58:27,893 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:58:27,893 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing f14d455ff3b60546f0a651dc8cf12d5c, disabling compactions & flushes 2024-12-16T17:58:27,893 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:27,893 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:27,893 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. after waiting 0 ms 2024-12-16T17:58:27,893 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:27,893 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:27,893 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:27,894 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T17:58:27,895 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734371907894"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734371907894"}]},"ts":"1734371907894"} 2024-12-16T17:58:27,896 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T17:58:27,897 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T17:58:27,897 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371907897"}]},"ts":"1734371907897"} 2024-12-16T17:58:27,899 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-16T17:58:27,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f14d455ff3b60546f0a651dc8cf12d5c, ASSIGN}] 2024-12-16T17:58:27,981 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f14d455ff3b60546f0a651dc8cf12d5c, ASSIGN 2024-12-16T17:58:27,982 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f14d455ff3b60546f0a651dc8cf12d5c, ASSIGN; state=OFFLINE, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=false 2024-12-16T17:58:28,133 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=f14d455ff3b60546f0a651dc8cf12d5c, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:28,134 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; OpenRegionProcedure f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:58:28,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T17:58:28,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:28,292 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:28,293 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7285): Opening region: {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:58:28,294 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,294 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:58:28,294 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7327): checking encryption for f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,294 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7330): checking classloading for f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,297 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,298 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:28,299 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f14d455ff3b60546f0a651dc8cf12d5c columnFamilyName A 2024-12-16T17:58:28,299 DEBUG [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:28,299 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.HStore(327): Store=f14d455ff3b60546f0a651dc8cf12d5c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:28,299 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,301 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:28,301 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f14d455ff3b60546f0a651dc8cf12d5c columnFamilyName B 2024-12-16T17:58:28,301 DEBUG [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:28,301 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.HStore(327): Store=f14d455ff3b60546f0a651dc8cf12d5c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:28,302 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,303 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:28,303 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f14d455ff3b60546f0a651dc8cf12d5c columnFamilyName C 2024-12-16T17:58:28,303 DEBUG [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:28,303 INFO [StoreOpener-f14d455ff3b60546f0a651dc8cf12d5c-1 {}] regionserver.HStore(327): Store=f14d455ff3b60546f0a651dc8cf12d5c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:28,304 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:28,304 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,305 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,306 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:58:28,308 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1085): writing seq id for f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:28,310 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:58:28,310 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1102): Opened f14d455ff3b60546f0a651dc8cf12d5c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69986645, jitterRate=0.0428822785615921}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:58:28,311 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1001): Region open journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:28,312 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., pid=117, masterSystemTime=1734371908286 2024-12-16T17:58:28,313 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:28,313 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:28,314 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=f14d455ff3b60546f0a651dc8cf12d5c, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:28,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-16T17:58:28,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; OpenRegionProcedure f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 in 181 msec 2024-12-16T17:58:28,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-16T17:58:28,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f14d455ff3b60546f0a651dc8cf12d5c, ASSIGN in 338 msec 2024-12-16T17:58:28,318 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T17:58:28,318 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371908318"}]},"ts":"1734371908318"} 2024-12-16T17:58:28,319 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-16T17:58:28,362 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T17:58:28,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2940 sec 2024-12-16T17:58:29,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T17:58:29,183 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 115 completed 2024-12-16T17:58:29,186 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a5bc453 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1ecb7b9 2024-12-16T17:58:29,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167e9e5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,243 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,246 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,248 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T17:58:29,249 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T17:58:29,252 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x329b4b28 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@56ae73bd 2024-12-16T17:58:29,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34c0ab7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,264 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x198eb3f8 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c1808ef 2024-12-16T17:58:29,277 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@db2df5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x339e38e9 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4f3d1105 2024-12-16T17:58:29,287 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ee47be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,288 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53df282d to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@744dae6a 2024-12-16T17:58:29,295 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52b8c4ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,296 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e527c5 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50c2ee6d 2024-12-16T17:58:29,304 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44c9ef0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,305 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f179203 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42d855d 2024-12-16T17:58:29,312 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fe5e338, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,313 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0992ece1 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61fd97e3 2024-12-16T17:58:29,320 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@315154c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,321 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22a055db to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef9ce3 2024-12-16T17:58:29,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142302b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x523a2789 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@161a1a9 2024-12-16T17:58:29,337 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d5b3d9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,337 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x59494c51 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5529c238 2024-12-16T17:58:29,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14a92ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:29,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:29,347 DEBUG [hconnection-0x17fbeb89-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-16T17:58:29,348 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,348 DEBUG [hconnection-0x6c2f3258-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,348 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:29,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-16T17:58:29,348 DEBUG [hconnection-0x4c5aa4ee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,349 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:29,349 DEBUG [hconnection-0x2f98c4cf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,349 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:29,349 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,349 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,350 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,350 DEBUG [hconnection-0x4746177f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,351 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,351 DEBUG [hconnection-0x3b17e29f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,351 DEBUG [hconnection-0x41391b5d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,352 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,352 DEBUG [hconnection-0x6f6059ab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,352 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,353 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,354 DEBUG [hconnection-0x147a5b94-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,355 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:29,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:58:29,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:29,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:29,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:29,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:29,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:29,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:29,359 DEBUG [hconnection-0x74c1324d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:29,361 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35488, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:29,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371969367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371969367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371969367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371969368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371969369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/842193dbac8b42018e5fcf9924469536 is 50, key is test_row_0/A:col10/1734371909353/Put/seqid=0 2024-12-16T17:58:29,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742277_1453 (size=14341) 2024-12-16T17:58:29,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-16T17:58:29,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371969469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371969469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371969469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371969469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371969470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:29,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:29,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-16T17:58:29,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:29,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:29,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371969673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371969674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371969675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371969675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371969675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/842193dbac8b42018e5fcf9924469536 2024-12-16T17:58:29,805 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:29,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:29,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/cf8991069ad04d12b36cfa5c2cf08c15 is 50, key is test_row_0/B:col10/1734371909353/Put/seqid=0 2024-12-16T17:58:29,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742278_1454 (size=12001) 2024-12-16T17:58:29,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-16T17:58:29,958 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:29,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:29,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:29,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:29,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371969977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371969977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371969978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371969978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:29,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371969979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,110 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/cf8991069ad04d12b36cfa5c2cf08c15 2024-12-16T17:58:30,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/52a65fa7bce146b685e460f3821bcf78 is 50, key is test_row_0/C:col10/1734371909353/Put/seqid=0 2024-12-16T17:58:30,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742279_1455 (size=12001) 2024-12-16T17:58:30,262 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:30,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:30,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,263 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,415 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-16T17:58:30,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371970480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371970483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371970483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371970485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371970486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:30,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:30,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:30,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/52a65fa7bce146b685e460f3821bcf78 2024-12-16T17:58:30,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/842193dbac8b42018e5fcf9924469536 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/842193dbac8b42018e5fcf9924469536 2024-12-16T17:58:30,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/842193dbac8b42018e5fcf9924469536, entries=200, sequenceid=13, filesize=14.0 K 2024-12-16T17:58:30,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/cf8991069ad04d12b36cfa5c2cf08c15 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cf8991069ad04d12b36cfa5c2cf08c15 2024-12-16T17:58:30,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cf8991069ad04d12b36cfa5c2cf08c15, entries=150, sequenceid=13, filesize=11.7 K 2024-12-16T17:58:30,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/52a65fa7bce146b685e460f3821bcf78 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/52a65fa7bce146b685e460f3821bcf78 2024-12-16T17:58:30,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/52a65fa7bce146b685e460f3821bcf78, entries=150, sequenceid=13, filesize=11.7 K 2024-12-16T17:58:30,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for f14d455ff3b60546f0a651dc8cf12d5c in 1289ms, sequenceid=13, compaction requested=false 2024-12-16T17:58:30,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:30,720 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:30,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-16T17:58:30,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:30,720 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:58:30,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:30,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:30,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:30,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:30,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:30,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:30,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/2b538a89aa2b4dfc988362f0e699d81f is 50, key is test_row_0/A:col10/1734371909367/Put/seqid=0 2024-12-16T17:58:30,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742280_1456 (size=12001) 2024-12-16T17:58:31,128 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/2b538a89aa2b4dfc988362f0e699d81f 2024-12-16T17:58:31,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/9808bc9bd8024435b3b678911af5d4b0 is 50, key is test_row_0/B:col10/1734371909367/Put/seqid=0 2024-12-16T17:58:31,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742281_1457 (size=12001) 2024-12-16T17:58:31,138 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/9808bc9bd8024435b3b678911af5d4b0 2024-12-16T17:58:31,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/91c5d4a349f142c39f850ee27741357f is 50, key is test_row_0/C:col10/1734371909367/Put/seqid=0 2024-12-16T17:58:31,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742282_1458 (size=12001) 2024-12-16T17:58:31,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-16T17:58:31,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:31,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:31,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371971494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371971495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371971496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371971498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371971498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,551 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/91c5d4a349f142c39f850ee27741357f 2024-12-16T17:58:31,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/2b538a89aa2b4dfc988362f0e699d81f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/2b538a89aa2b4dfc988362f0e699d81f 2024-12-16T17:58:31,557 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/2b538a89aa2b4dfc988362f0e699d81f, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:58:31,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/9808bc9bd8024435b3b678911af5d4b0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9808bc9bd8024435b3b678911af5d4b0 2024-12-16T17:58:31,562 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9808bc9bd8024435b3b678911af5d4b0, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:58:31,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/91c5d4a349f142c39f850ee27741357f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/91c5d4a349f142c39f850ee27741357f 2024-12-16T17:58:31,566 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/91c5d4a349f142c39f850ee27741357f, entries=150, sequenceid=38, filesize=11.7 K 2024-12-16T17:58:31,566 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f14d455ff3b60546f0a651dc8cf12d5c in 846ms, sequenceid=38, compaction requested=false 2024-12-16T17:58:31,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:31,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:31,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-16T17:58:31,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-16T17:58:31,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-16T17:58:31,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2190 sec 2024-12-16T17:58:31,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.2220 sec 2024-12-16T17:58:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:31,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-16T17:58:31,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:31,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:31,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:31,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:31,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:31,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:31,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/c3e8b2af92234f888643b88ca11e8a36 is 50, key is test_row_0/A:col10/1734371911498/Put/seqid=0 2024-12-16T17:58:31,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742283_1459 (size=14341) 2024-12-16T17:58:31,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/c3e8b2af92234f888643b88ca11e8a36 2024-12-16T17:58:31,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/6f81c30f127d4c21a87aec9ce9e33636 is 50, key is test_row_0/B:col10/1734371911498/Put/seqid=0 2024-12-16T17:58:31,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742284_1460 (size=12001) 2024-12-16T17:58:31,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/6f81c30f127d4c21a87aec9ce9e33636 2024-12-16T17:58:31,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/5098bc816b524042a7de236ea05f4b2e is 50, key is test_row_0/C:col10/1734371911498/Put/seqid=0 2024-12-16T17:58:31,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742285_1461 (size=12001) 2024-12-16T17:58:31,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371971626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371971626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371971628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371971628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371971629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371971735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371971735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371971736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371971736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371971737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371971942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371971942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371971943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371971943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:31,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371971943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/5098bc816b524042a7de236ea05f4b2e 2024-12-16T17:58:32,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/c3e8b2af92234f888643b88ca11e8a36 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/c3e8b2af92234f888643b88ca11e8a36 2024-12-16T17:58:32,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/c3e8b2af92234f888643b88ca11e8a36, entries=200, sequenceid=50, filesize=14.0 K 2024-12-16T17:58:32,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/6f81c30f127d4c21a87aec9ce9e33636 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6f81c30f127d4c21a87aec9ce9e33636 2024-12-16T17:58:32,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6f81c30f127d4c21a87aec9ce9e33636, entries=150, sequenceid=50, filesize=11.7 K 2024-12-16T17:58:32,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/5098bc816b524042a7de236ea05f4b2e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/5098bc816b524042a7de236ea05f4b2e 2024-12-16T17:58:32,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/5098bc816b524042a7de236ea05f4b2e, entries=150, sequenceid=50, filesize=11.7 K 2024-12-16T17:58:32,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f14d455ff3b60546f0a651dc8cf12d5c in 446ms, sequenceid=50, compaction requested=true 2024-12-16T17:58:32,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:32,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:32,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:32,048 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:32,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:32,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:32,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:32,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:32,048 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:32,049 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:32,049 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:32,049 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:32,049 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:32,049 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:32,049 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:32,049 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cf8991069ad04d12b36cfa5c2cf08c15, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9808bc9bd8024435b3b678911af5d4b0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6f81c30f127d4c21a87aec9ce9e33636] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=35.2 K 2024-12-16T17:58:32,049 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/842193dbac8b42018e5fcf9924469536, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/2b538a89aa2b4dfc988362f0e699d81f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/c3e8b2af92234f888643b88ca11e8a36] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=39.7 K 2024-12-16T17:58:32,050 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 842193dbac8b42018e5fcf9924469536, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734371909353 2024-12-16T17:58:32,050 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting cf8991069ad04d12b36cfa5c2cf08c15, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734371909353 2024-12-16T17:58:32,051 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b538a89aa2b4dfc988362f0e699d81f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371909366 2024-12-16T17:58:32,051 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 9808bc9bd8024435b3b678911af5d4b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371909366 2024-12-16T17:58:32,051 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f81c30f127d4c21a87aec9ce9e33636, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734371911495 2024-12-16T17:58:32,051 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3e8b2af92234f888643b88ca11e8a36, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734371911495 2024-12-16T17:58:32,056 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:32,056 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#388 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:32,056 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/6c35945ff87f4e058f7617e7088d9bfb is 50, key is test_row_0/B:col10/1734371911498/Put/seqid=0 2024-12-16T17:58:32,057 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/23102372950e442f8c5ea323226c4c5c is 50, key is test_row_0/A:col10/1734371911498/Put/seqid=0 2024-12-16T17:58:32,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742287_1463 (size=12104) 2024-12-16T17:58:32,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742286_1462 (size=12104) 2024-12-16T17:58:32,072 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/6c35945ff87f4e058f7617e7088d9bfb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6c35945ff87f4e058f7617e7088d9bfb 2024-12-16T17:58:32,076 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into 6c35945ff87f4e058f7617e7088d9bfb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:32,076 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:32,076 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=13, startTime=1734371912048; duration=0sec 2024-12-16T17:58:32,076 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:32,076 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:32,076 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:32,077 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:32,077 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:32,077 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:32,077 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/52a65fa7bce146b685e460f3821bcf78, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/91c5d4a349f142c39f850ee27741357f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/5098bc816b524042a7de236ea05f4b2e] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=35.2 K 2024-12-16T17:58:32,077 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 52a65fa7bce146b685e460f3821bcf78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734371909353 2024-12-16T17:58:32,078 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 91c5d4a349f142c39f850ee27741357f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734371909366 2024-12-16T17:58:32,078 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 5098bc816b524042a7de236ea05f4b2e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734371911495 2024-12-16T17:58:32,084 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#389 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:32,084 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/f643e6aee8714504afdd7ab73d31e955 is 50, key is test_row_0/C:col10/1734371911498/Put/seqid=0 2024-12-16T17:58:32,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742288_1464 (size=12104) 2024-12-16T17:58:32,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:32,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:58:32,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:32,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:32,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:32,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:32,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:32,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:32,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/db0b7eec8e4b45c48f2f3cea70858c67 is 50, key is test_row_0/A:col10/1734371912249/Put/seqid=0 2024-12-16T17:58:32,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742289_1465 (size=12001) 2024-12-16T17:58:32,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371972255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,281 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-16T17:58:32,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371972256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371972281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371972281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371972281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371972382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371972386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371972389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371972389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371972390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,472 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/23102372950e442f8c5ea323226c4c5c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/23102372950e442f8c5ea323226c4c5c 2024-12-16T17:58:32,475 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into 23102372950e442f8c5ea323226c4c5c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:32,475 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:32,475 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=13, startTime=1734371912048; duration=0sec 2024-12-16T17:58:32,475 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:32,475 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:32,490 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/f643e6aee8714504afdd7ab73d31e955 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f643e6aee8714504afdd7ab73d31e955 2024-12-16T17:58:32,493 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into f643e6aee8714504afdd7ab73d31e955(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:32,493 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:32,493 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=13, startTime=1734371912048; duration=0sec 2024-12-16T17:58:32,494 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:32,494 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:32,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371972585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371972591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371972593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371972593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371972594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/db0b7eec8e4b45c48f2f3cea70858c67 2024-12-16T17:58:32,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/9f5312e09b894d17bf196375057a0edb is 50, key is test_row_0/B:col10/1734371912249/Put/seqid=0 2024-12-16T17:58:32,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742290_1466 (size=12001) 2024-12-16T17:58:32,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371972891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371972895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371972896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371972897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:32,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:32,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371972898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:33,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/9f5312e09b894d17bf196375057a0edb 2024-12-16T17:58:33,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/db4748bda3b44426b922300557d646b9 is 50, key is test_row_0/C:col10/1734371912249/Put/seqid=0 2024-12-16T17:58:33,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742291_1467 (size=12001) 2024-12-16T17:58:33,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:33,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371973395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:33,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371973399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:33,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:33,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371973400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:33,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:33,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371973401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:33,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:33,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371973403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:33,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-16T17:58:33,452 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-16T17:58:33,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-16T17:58:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-16T17:58:33,454 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:33,455 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:33,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:33,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/db4748bda3b44426b922300557d646b9 2024-12-16T17:58:33,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/db0b7eec8e4b45c48f2f3cea70858c67 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/db0b7eec8e4b45c48f2f3cea70858c67 2024-12-16T17:58:33,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/db0b7eec8e4b45c48f2f3cea70858c67, entries=150, sequenceid=78, filesize=11.7 K 2024-12-16T17:58:33,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/9f5312e09b894d17bf196375057a0edb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9f5312e09b894d17bf196375057a0edb 2024-12-16T17:58:33,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9f5312e09b894d17bf196375057a0edb, entries=150, sequenceid=78, filesize=11.7 K 2024-12-16T17:58:33,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/db4748bda3b44426b922300557d646b9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/db4748bda3b44426b922300557d646b9 2024-12-16T17:58:33,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/db4748bda3b44426b922300557d646b9, entries=150, sequenceid=78, filesize=11.7 K 2024-12-16T17:58:33,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=53.67 KB/54960 for f14d455ff3b60546f0a651dc8cf12d5c in 1243ms, sequenceid=78, compaction requested=false 2024-12-16T17:58:33,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:33,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-16T17:58:33,606 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:33,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-16T17:58:33,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:33,606 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:58:33,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:33,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:33,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:33,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:33,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:33,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:33,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/8e7da218410644a3b71ac4378cd45751 is 50, key is test_row_0/A:col10/1734371912261/Put/seqid=0 2024-12-16T17:58:33,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742292_1468 (size=12001) 2024-12-16T17:58:33,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-16T17:58:34,013 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/8e7da218410644a3b71ac4378cd45751 2024-12-16T17:58:34,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e328a284fdce4a9f85219a6d87165bbe is 50, key is test_row_0/B:col10/1734371912261/Put/seqid=0 2024-12-16T17:58:34,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742293_1469 (size=12001) 2024-12-16T17:58:34,022 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e328a284fdce4a9f85219a6d87165bbe 2024-12-16T17:58:34,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/ada8348084074b93a2f92f8270e1dcbe is 50, key is test_row_0/C:col10/1734371912261/Put/seqid=0 2024-12-16T17:58:34,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742294_1470 (size=12001) 2024-12-16T17:58:34,038 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/ada8348084074b93a2f92f8270e1dcbe 2024-12-16T17:58:34,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/8e7da218410644a3b71ac4378cd45751 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/8e7da218410644a3b71ac4378cd45751 2024-12-16T17:58:34,045 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/8e7da218410644a3b71ac4378cd45751, entries=150, sequenceid=91, filesize=11.7 K 2024-12-16T17:58:34,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e328a284fdce4a9f85219a6d87165bbe as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e328a284fdce4a9f85219a6d87165bbe 2024-12-16T17:58:34,048 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e328a284fdce4a9f85219a6d87165bbe, entries=150, sequenceid=91, filesize=11.7 K 2024-12-16T17:58:34,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/ada8348084074b93a2f92f8270e1dcbe as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/ada8348084074b93a2f92f8270e1dcbe 2024-12-16T17:58:34,053 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/ada8348084074b93a2f92f8270e1dcbe, entries=150, sequenceid=91, filesize=11.7 K 2024-12-16T17:58:34,054 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for f14d455ff3b60546f0a651dc8cf12d5c in 448ms, sequenceid=91, compaction requested=true 2024-12-16T17:58:34,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:34,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-16T17:58:34,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-16T17:58:34,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-16T17:58:34,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-16T17:58:34,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 600 msec 2024-12-16T17:58:34,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 603 msec 2024-12-16T17:58:34,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:34,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:58:34,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:34,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:34,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:34,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:34,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:34,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:34,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/12999b5a21b94f258fe9c5e0a92a57c1 is 50, key is test_row_0/A:col10/1734371914411/Put/seqid=0 2024-12-16T17:58:34,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742295_1471 (size=19021) 2024-12-16T17:58:34,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/12999b5a21b94f258fe9c5e0a92a57c1 2024-12-16T17:58:34,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/d6c6e52a8cac4df78be947ad31f240a7 is 50, key is test_row_0/B:col10/1734371914411/Put/seqid=0 2024-12-16T17:58:34,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742296_1472 (size=12001) 2024-12-16T17:58:34,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/d6c6e52a8cac4df78be947ad31f240a7 2024-12-16T17:58:34,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/b75c417e400c4050baa5182da49042dc is 50, key is test_row_0/C:col10/1734371914411/Put/seqid=0 2024-12-16T17:58:34,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371974442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371974442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371974443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371974443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371974445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742297_1473 (size=12001) 2024-12-16T17:58:34,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/b75c417e400c4050baa5182da49042dc 2024-12-16T17:58:34,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/12999b5a21b94f258fe9c5e0a92a57c1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/12999b5a21b94f258fe9c5e0a92a57c1 2024-12-16T17:58:34,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/12999b5a21b94f258fe9c5e0a92a57c1, entries=300, sequenceid=104, filesize=18.6 K 2024-12-16T17:58:34,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/d6c6e52a8cac4df78be947ad31f240a7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/d6c6e52a8cac4df78be947ad31f240a7 2024-12-16T17:58:34,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/d6c6e52a8cac4df78be947ad31f240a7, entries=150, sequenceid=104, filesize=11.7 K 2024-12-16T17:58:34,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/b75c417e400c4050baa5182da49042dc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b75c417e400c4050baa5182da49042dc 2024-12-16T17:58:34,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b75c417e400c4050baa5182da49042dc, entries=150, sequenceid=104, filesize=11.7 K 2024-12-16T17:58:34,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f14d455ff3b60546f0a651dc8cf12d5c in 67ms, sequenceid=104, compaction requested=true 2024-12-16T17:58:34,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:34,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:34,482 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:34,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:34,482 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:34,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:34,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:34,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:34,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:34,483 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:34,483 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:34,483 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,483 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6c35945ff87f4e058f7617e7088d9bfb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9f5312e09b894d17bf196375057a0edb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e328a284fdce4a9f85219a6d87165bbe, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/d6c6e52a8cac4df78be947ad31f240a7] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=47.0 K 2024-12-16T17:58:34,483 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55127 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:34,483 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:34,484 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,484 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/23102372950e442f8c5ea323226c4c5c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/db0b7eec8e4b45c48f2f3cea70858c67, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/8e7da218410644a3b71ac4378cd45751, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/12999b5a21b94f258fe9c5e0a92a57c1] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=53.8 K 2024-12-16T17:58:34,484 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c35945ff87f4e058f7617e7088d9bfb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734371911495 2024-12-16T17:58:34,484 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23102372950e442f8c5ea323226c4c5c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734371911495 2024-12-16T17:58:34,484 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f5312e09b894d17bf196375057a0edb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734371911625 2024-12-16T17:58:34,484 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting db0b7eec8e4b45c48f2f3cea70858c67, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734371911625 2024-12-16T17:58:34,485 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e7da218410644a3b71ac4378cd45751, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734371912255 2024-12-16T17:58:34,485 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e328a284fdce4a9f85219a6d87165bbe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734371912255 2024-12-16T17:58:34,486 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d6c6e52a8cac4df78be947ad31f240a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1734371914411 2024-12-16T17:58:34,486 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12999b5a21b94f258fe9c5e0a92a57c1, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1734371914410 2024-12-16T17:58:34,499 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#400 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:34,499 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#399 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:34,499 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ef7c5e301f5f4f6c8d83bbd5c9e416b7 is 50, key is test_row_0/A:col10/1734371914411/Put/seqid=0 2024-12-16T17:58:34,499 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/389c1f1159c54c96827698c05c99ef58 is 50, key is test_row_0/B:col10/1734371914411/Put/seqid=0 2024-12-16T17:58:34,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742298_1474 (size=12241) 2024-12-16T17:58:34,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742299_1475 (size=12241) 2024-12-16T17:58:34,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:34,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-16T17:58:34,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:34,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:34,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:34,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:34,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:34,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:34,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-16T17:58:34,557 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-16T17:58:34,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/6935df28d00e490aadba59790202db4a is 50, key is test_row_0/A:col10/1734371914552/Put/seqid=0 2024-12-16T17:58:34,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:34,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-16T17:58:34,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T17:58:34,559 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:34,560 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:34,560 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:34,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742300_1476 (size=14341) 2024-12-16T17:58:34,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/6935df28d00e490aadba59790202db4a 2024-12-16T17:58:34,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/cc5b7c5119184ecea19d1f5cb70d4487 is 50, key is test_row_0/B:col10/1734371914552/Put/seqid=0 2024-12-16T17:58:34,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371974563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371974564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371974565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371974565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371974565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742301_1477 (size=12001) 2024-12-16T17:58:34,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/cc5b7c5119184ecea19d1f5cb70d4487 2024-12-16T17:58:34,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/7c896520f2194ecd9942c205fca8009b is 50, key is test_row_0/C:col10/1734371914552/Put/seqid=0 2024-12-16T17:58:34,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742302_1478 (size=12001) 2024-12-16T17:58:34,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T17:58:34,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371974670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371974671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371974673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371974674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371974674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-16T17:58:34,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:34,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:34,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:34,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:34,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T17:58:34,864 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-16T17:58:34,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:34,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:34,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:34,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371974876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371974876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371974877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371974877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371974879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:34,912 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/389c1f1159c54c96827698c05c99ef58 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/389c1f1159c54c96827698c05c99ef58 2024-12-16T17:58:34,916 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into 389c1f1159c54c96827698c05c99ef58(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:34,916 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:34,916 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=12, startTime=1734371914482; duration=0sec 2024-12-16T17:58:34,916 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:34,916 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:34,916 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:34,917 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ef7c5e301f5f4f6c8d83bbd5c9e416b7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ef7c5e301f5f4f6c8d83bbd5c9e416b7 2024-12-16T17:58:34,917 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:34,917 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:34,917 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:34,917 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f643e6aee8714504afdd7ab73d31e955, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/db4748bda3b44426b922300557d646b9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/ada8348084074b93a2f92f8270e1dcbe, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b75c417e400c4050baa5182da49042dc] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=47.0 K 2024-12-16T17:58:34,917 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting f643e6aee8714504afdd7ab73d31e955, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734371911495 2024-12-16T17:58:34,918 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting db4748bda3b44426b922300557d646b9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734371911625 2024-12-16T17:58:34,918 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting ada8348084074b93a2f92f8270e1dcbe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734371912255 2024-12-16T17:58:34,918 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b75c417e400c4050baa5182da49042dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1734371914411 2024-12-16T17:58:34,920 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into ef7c5e301f5f4f6c8d83bbd5c9e416b7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:34,920 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:34,920 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=12, startTime=1734371914482; duration=0sec 2024-12-16T17:58:34,920 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:34,920 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:34,923 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#404 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:34,923 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/6c076be5ba374c5f8e7f7df3e1cb4578 is 50, key is test_row_0/C:col10/1734371914411/Put/seqid=0 2024-12-16T17:58:34,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742303_1479 (size=12241) 2024-12-16T17:58:34,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/7c896520f2194ecd9942c205fca8009b 2024-12-16T17:58:34,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/6935df28d00e490aadba59790202db4a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6935df28d00e490aadba59790202db4a 2024-12-16T17:58:34,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6935df28d00e490aadba59790202db4a, entries=200, sequenceid=128, filesize=14.0 K 2024-12-16T17:58:34,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/cc5b7c5119184ecea19d1f5cb70d4487 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cc5b7c5119184ecea19d1f5cb70d4487 2024-12-16T17:58:34,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cc5b7c5119184ecea19d1f5cb70d4487, entries=150, sequenceid=128, filesize=11.7 K 2024-12-16T17:58:34,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/7c896520f2194ecd9942c205fca8009b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/7c896520f2194ecd9942c205fca8009b 2024-12-16T17:58:34,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/7c896520f2194ecd9942c205fca8009b, entries=150, sequenceid=128, filesize=11.7 K 2024-12-16T17:58:34,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f14d455ff3b60546f0a651dc8cf12d5c in 445ms, sequenceid=128, compaction requested=false 2024-12-16T17:58:34,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:35,017 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-16T17:58:35,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:35,017 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-16T17:58:35,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:35,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:35,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:35,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:35,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:35,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:35,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/03a7b5816df64c94b9359fabcf889428 is 50, key is test_row_0/A:col10/1734371914564/Put/seqid=0 2024-12-16T17:58:35,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742304_1480 (size=12151) 2024-12-16T17:58:35,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T17:58:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:35,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:35,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371975207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371975208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371975208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371975217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371975217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371975318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371975318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371975318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371975322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371975327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,334 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/6c076be5ba374c5f8e7f7df3e1cb4578 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6c076be5ba374c5f8e7f7df3e1cb4578 2024-12-16T17:58:35,337 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into 6c076be5ba374c5f8e7f7df3e1cb4578(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:35,337 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:35,337 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=12, startTime=1734371914482; duration=0sec 2024-12-16T17:58:35,337 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:35,337 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:35,424 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/03a7b5816df64c94b9359fabcf889428 2024-12-16T17:58:35,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e7c8c35209ed4199881958f1b9e0d8f7 is 50, key is test_row_0/B:col10/1734371914564/Put/seqid=0 2024-12-16T17:58:35,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742305_1481 (size=12151) 2024-12-16T17:58:35,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371975522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371975523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371975523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371975525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371975531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T17:58:35,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371975826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371975827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371975828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371975829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,832 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e7c8c35209ed4199881958f1b9e0d8f7 2024-12-16T17:58:35,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/e31958c7a9c946b295d968d09ae561c3 is 50, key is test_row_0/C:col10/1734371914564/Put/seqid=0 2024-12-16T17:58:35,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371975836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:35,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742306_1482 (size=12151) 2024-12-16T17:58:36,242 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/e31958c7a9c946b295d968d09ae561c3 2024-12-16T17:58:36,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/03a7b5816df64c94b9359fabcf889428 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/03a7b5816df64c94b9359fabcf889428 2024-12-16T17:58:36,248 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/03a7b5816df64c94b9359fabcf889428, entries=150, sequenceid=142, filesize=11.9 K 2024-12-16T17:58:36,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e7c8c35209ed4199881958f1b9e0d8f7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e7c8c35209ed4199881958f1b9e0d8f7 2024-12-16T17:58:36,251 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e7c8c35209ed4199881958f1b9e0d8f7, entries=150, sequenceid=142, filesize=11.9 K 2024-12-16T17:58:36,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/e31958c7a9c946b295d968d09ae561c3 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e31958c7a9c946b295d968d09ae561c3 2024-12-16T17:58:36,254 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e31958c7a9c946b295d968d09ae561c3, entries=150, sequenceid=142, filesize=11.9 K 2024-12-16T17:58:36,254 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for f14d455ff3b60546f0a651dc8cf12d5c in 1237ms, sequenceid=142, compaction requested=true 2024-12-16T17:58:36,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:36,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:36,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-16T17:58:36,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-16T17:58:36,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-16T17:58:36,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6950 sec 2024-12-16T17:58:36,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.6980 sec 2024-12-16T17:58:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:36,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:58:36,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:36,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:36,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:36,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:36,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:36,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:36,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/3175bad5261e4181baeb3f43c70f9c4d is 50, key is test_row_0/A:col10/1734371915216/Put/seqid=0 2024-12-16T17:58:36,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742307_1483 (size=14541) 2024-12-16T17:58:36,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/3175bad5261e4181baeb3f43c70f9c4d 2024-12-16T17:58:36,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371976340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371976341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/b8481288c079486f82d2e82a057bdeb1 is 50, key is test_row_0/B:col10/1734371915216/Put/seqid=0 2024-12-16T17:58:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371976342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371976344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742308_1484 (size=12151) 2024-12-16T17:58:36,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371976346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371976447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371976448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371976448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371976453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371976650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371976653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371976654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371976657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T17:58:36,663 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-16T17:58:36,663 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:36,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-16T17:58:36,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T17:58:36,664 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:36,665 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:36,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:36,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/b8481288c079486f82d2e82a057bdeb1 2024-12-16T17:58:36,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/b62ecf0392b247199939e0d351e30dae is 50, key is test_row_0/C:col10/1734371915216/Put/seqid=0 2024-12-16T17:58:36,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742309_1485 (size=12151) 2024-12-16T17:58:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T17:58:36,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/b62ecf0392b247199939e0d351e30dae 2024-12-16T17:58:36,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/3175bad5261e4181baeb3f43c70f9c4d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/3175bad5261e4181baeb3f43c70f9c4d 2024-12-16T17:58:36,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/3175bad5261e4181baeb3f43c70f9c4d, entries=200, sequenceid=170, filesize=14.2 K 2024-12-16T17:58:36,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/b8481288c079486f82d2e82a057bdeb1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/b8481288c079486f82d2e82a057bdeb1 2024-12-16T17:58:36,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/b8481288c079486f82d2e82a057bdeb1, entries=150, sequenceid=170, filesize=11.9 K 2024-12-16T17:58:36,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/b62ecf0392b247199939e0d351e30dae as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b62ecf0392b247199939e0d351e30dae 2024-12-16T17:58:36,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b62ecf0392b247199939e0d351e30dae, entries=150, sequenceid=170, filesize=11.9 K 2024-12-16T17:58:36,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for f14d455ff3b60546f0a651dc8cf12d5c in 446ms, sequenceid=170, compaction requested=true 2024-12-16T17:58:36,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:36,780 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:36,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:36,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:36,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:36,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:36,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:36,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-16T17:58:36,781 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53274 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:36,781 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:36,781 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:36,781 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:36,781 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ef7c5e301f5f4f6c8d83bbd5c9e416b7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6935df28d00e490aadba59790202db4a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/03a7b5816df64c94b9359fabcf889428, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/3175bad5261e4181baeb3f43c70f9c4d] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=52.0 K 2024-12-16T17:58:36,781 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef7c5e301f5f4f6c8d83bbd5c9e416b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1734371914411 2024-12-16T17:58:36,782 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6935df28d00e490aadba59790202db4a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734371914442 2024-12-16T17:58:36,782 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:36,782 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03a7b5816df64c94b9359fabcf889428, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1734371914554 2024-12-16T17:58:36,782 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:36,782 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:36,782 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/389c1f1159c54c96827698c05c99ef58, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cc5b7c5119184ecea19d1f5cb70d4487, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e7c8c35209ed4199881958f1b9e0d8f7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/b8481288c079486f82d2e82a057bdeb1] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=47.4 K 2024-12-16T17:58:36,782 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3175bad5261e4181baeb3f43c70f9c4d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734371915207 2024-12-16T17:58:36,782 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 389c1f1159c54c96827698c05c99ef58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1734371914411 2024-12-16T17:58:36,782 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting cc5b7c5119184ecea19d1f5cb70d4487, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734371914442 2024-12-16T17:58:36,783 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e7c8c35209ed4199881958f1b9e0d8f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1734371914554 2024-12-16T17:58:36,783 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b8481288c079486f82d2e82a057bdeb1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734371915207 2024-12-16T17:58:36,789 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#411 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:36,789 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/028c7d69a57a4729aed44044fbae59e7 is 50, key is test_row_0/A:col10/1734371915216/Put/seqid=0 2024-12-16T17:58:36,816 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-16T17:58:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:36,817 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-16T17:58:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:36,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:36,818 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#412 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:36,818 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/48335edbd9514f6eb1f131d99c2dab23 is 50, key is test_row_0/B:col10/1734371915216/Put/seqid=0 2024-12-16T17:58:36,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742310_1486 (size=12527) 2024-12-16T17:58:36,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ca837e5e4a4b4588ab991384e8abd39e is 50, key is test_row_0/A:col10/1734371916341/Put/seqid=0 2024-12-16T17:58:36,826 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/028c7d69a57a4729aed44044fbae59e7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/028c7d69a57a4729aed44044fbae59e7 2024-12-16T17:58:36,830 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into 028c7d69a57a4729aed44044fbae59e7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:36,830 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:36,830 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=12, startTime=1734371916780; duration=0sec 2024-12-16T17:58:36,830 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:36,830 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:36,830 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-16T17:58:36,831 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-16T17:58:36,831 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:36,831 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:36,831 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6c076be5ba374c5f8e7f7df3e1cb4578, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/7c896520f2194ecd9942c205fca8009b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e31958c7a9c946b295d968d09ae561c3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b62ecf0392b247199939e0d351e30dae] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=47.4 K 2024-12-16T17:58:36,831 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c076be5ba374c5f8e7f7df3e1cb4578, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1734371914411 2024-12-16T17:58:36,832 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c896520f2194ecd9942c205fca8009b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734371914442 2024-12-16T17:58:36,832 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e31958c7a9c946b295d968d09ae561c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1734371914554 2024-12-16T17:58:36,832 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting b62ecf0392b247199939e0d351e30dae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734371915207 2024-12-16T17:58:36,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742311_1487 (size=12527) 2024-12-16T17:58:36,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742312_1488 (size=12151) 2024-12-16T17:58:36,838 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ca837e5e4a4b4588ab991384e8abd39e 2024-12-16T17:58:36,838 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/48335edbd9514f6eb1f131d99c2dab23 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/48335edbd9514f6eb1f131d99c2dab23 2024-12-16T17:58:36,841 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#414 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:36,841 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/a0753d32d8484fdf9da23ceff39a024c is 50, key is test_row_0/C:col10/1734371915216/Put/seqid=0 2024-12-16T17:58:36,843 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into 48335edbd9514f6eb1f131d99c2dab23(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:36,843 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:36,843 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=12, startTime=1734371916780; duration=0sec 2024-12-16T17:58:36,843 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:36,843 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:36,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/effeceb4c20f4119ba8e1b0e5760b134 is 50, key is test_row_0/B:col10/1734371916341/Put/seqid=0 2024-12-16T17:58:36,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742313_1489 (size=12527) 2024-12-16T17:58:36,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742314_1490 (size=12151) 2024-12-16T17:58:36,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:36,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T17:58:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371976992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371976993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371976993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371976998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371977099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371977099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371977099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371977105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,253 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/a0753d32d8484fdf9da23ceff39a024c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/a0753d32d8484fdf9da23ceff39a024c 2024-12-16T17:58:37,256 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into a0753d32d8484fdf9da23ceff39a024c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:37,257 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:37,257 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=12, startTime=1734371916780; duration=0sec 2024-12-16T17:58:37,257 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:37,257 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:37,257 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/effeceb4c20f4119ba8e1b0e5760b134 2024-12-16T17:58:37,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/80841d2885d44c1bab067a3bc285eb0a is 50, key is test_row_0/C:col10/1734371916341/Put/seqid=0 2024-12-16T17:58:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742315_1491 (size=12151) 2024-12-16T17:58:37,266 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/80841d2885d44c1bab067a3bc285eb0a 2024-12-16T17:58:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T17:58:37,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ca837e5e4a4b4588ab991384e8abd39e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ca837e5e4a4b4588ab991384e8abd39e 2024-12-16T17:58:37,272 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ca837e5e4a4b4588ab991384e8abd39e, entries=150, sequenceid=179, filesize=11.9 K 2024-12-16T17:58:37,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/effeceb4c20f4119ba8e1b0e5760b134 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/effeceb4c20f4119ba8e1b0e5760b134 2024-12-16T17:58:37,275 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/effeceb4c20f4119ba8e1b0e5760b134, entries=150, sequenceid=179, filesize=11.9 K 2024-12-16T17:58:37,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/80841d2885d44c1bab067a3bc285eb0a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/80841d2885d44c1bab067a3bc285eb0a 2024-12-16T17:58:37,279 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/80841d2885d44c1bab067a3bc285eb0a, entries=150, sequenceid=179, filesize=11.9 K 2024-12-16T17:58:37,279 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for f14d455ff3b60546f0a651dc8cf12d5c in 462ms, sequenceid=179, compaction requested=false 2024-12-16T17:58:37,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:37,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:37,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-16T17:58:37,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-16T17:58:37,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-16T17:58:37,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 616 msec 2024-12-16T17:58:37,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 619 msec 2024-12-16T17:58:37,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:37,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-16T17:58:37,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:37,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:37,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:37,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:37,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:37,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:37,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/a28736933768401da0f3e79b87f68584 is 50, key is test_row_0/A:col10/1734371916993/Put/seqid=0 2024-12-16T17:58:37,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371977310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742316_1492 (size=16931) 2024-12-16T17:58:37,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371977313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371977314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371977315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371977360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371977415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371977417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371977418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371977419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371977619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371977623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371977624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371977625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/a28736933768401da0f3e79b87f68584 2024-12-16T17:58:37,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/0b84b959c2df4d8d84caddd51a132cfc is 50, key is test_row_0/B:col10/1734371916993/Put/seqid=0 2024-12-16T17:58:37,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742317_1493 (size=12151) 2024-12-16T17:58:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T17:58:37,767 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-16T17:58:37,768 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-16T17:58:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-16T17:58:37,769 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:37,770 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:37,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-16T17:58:37,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-16T17:58:37,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:37,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:37,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:37,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:37,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:37,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371977923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371977928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371977928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:37,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371977930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-16T17:58:38,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-16T17:58:38,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:38,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/0b84b959c2df4d8d84caddd51a132cfc 2024-12-16T17:58:38,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/75eebd318803401cb7fc12c9f9f59530 is 50, key is test_row_0/C:col10/1734371916993/Put/seqid=0 2024-12-16T17:58:38,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742318_1494 (size=12151) 2024-12-16T17:58:38,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-16T17:58:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-16T17:58:38,378 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-16T17:58:38,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:38,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:38,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371978427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:38,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371978435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371978436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371978438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,530 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-16T17:58:38,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:38,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:38,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/75eebd318803401cb7fc12c9f9f59530 2024-12-16T17:58:38,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/a28736933768401da0f3e79b87f68584 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a28736933768401da0f3e79b87f68584 2024-12-16T17:58:38,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a28736933768401da0f3e79b87f68584, entries=250, sequenceid=211, filesize=16.5 K 2024-12-16T17:58:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/0b84b959c2df4d8d84caddd51a132cfc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/0b84b959c2df4d8d84caddd51a132cfc 2024-12-16T17:58:38,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/0b84b959c2df4d8d84caddd51a132cfc, entries=150, sequenceid=211, filesize=11.9 K 2024-12-16T17:58:38,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/75eebd318803401cb7fc12c9f9f59530 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/75eebd318803401cb7fc12c9f9f59530 2024-12-16T17:58:38,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/75eebd318803401cb7fc12c9f9f59530, entries=150, sequenceid=211, filesize=11.9 K 2024-12-16T17:58:38,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for f14d455ff3b60546f0a651dc8cf12d5c in 1243ms, sequenceid=211, compaction requested=true 2024-12-16T17:58:38,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:38,551 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:38,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:38,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:38,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:38,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:38,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:38,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:38,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:38,551 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41609 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:38,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:38,551 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:38,551 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:38,551 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,551 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,552 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/48335edbd9514f6eb1f131d99c2dab23, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/effeceb4c20f4119ba8e1b0e5760b134, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/0b84b959c2df4d8d84caddd51a132cfc] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=36.0 K 2024-12-16T17:58:38,552 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/028c7d69a57a4729aed44044fbae59e7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ca837e5e4a4b4588ab991384e8abd39e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a28736933768401da0f3e79b87f68584] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=40.6 K 2024-12-16T17:58:38,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 48335edbd9514f6eb1f131d99c2dab23, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734371915207 2024-12-16T17:58:38,552 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 028c7d69a57a4729aed44044fbae59e7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734371915207 2024-12-16T17:58:38,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting effeceb4c20f4119ba8e1b0e5760b134, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1734371916340 2024-12-16T17:58:38,552 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca837e5e4a4b4588ab991384e8abd39e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1734371916340 2024-12-16T17:58:38,552 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b84b959c2df4d8d84caddd51a132cfc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734371916993 2024-12-16T17:58:38,552 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a28736933768401da0f3e79b87f68584, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734371916991 2024-12-16T17:58:38,557 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:38,557 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/61ad7bbb4f9644b49d9a61df979b1106 is 50, key is test_row_0/A:col10/1734371916993/Put/seqid=0 2024-12-16T17:58:38,557 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:38,558 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/c9db8f2ff86e4e9c9f0b8f0639248c1c is 50, key is test_row_0/B:col10/1734371916993/Put/seqid=0 2024-12-16T17:58:38,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742319_1495 (size=12629) 2024-12-16T17:58:38,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742320_1496 (size=12629) 2024-12-16T17:58:38,568 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/c9db8f2ff86e4e9c9f0b8f0639248c1c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c9db8f2ff86e4e9c9f0b8f0639248c1c 2024-12-16T17:58:38,572 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into c9db8f2ff86e4e9c9f0b8f0639248c1c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:38,572 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:38,572 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=13, startTime=1734371918551; duration=0sec 2024-12-16T17:58:38,572 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:38,572 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:38,572 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:38,573 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:38,573 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:38,573 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,573 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/a0753d32d8484fdf9da23ceff39a024c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/80841d2885d44c1bab067a3bc285eb0a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/75eebd318803401cb7fc12c9f9f59530] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=36.0 K 2024-12-16T17:58:38,573 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting a0753d32d8484fdf9da23ceff39a024c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734371915207 2024-12-16T17:58:38,573 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 80841d2885d44c1bab067a3bc285eb0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1734371916340 2024-12-16T17:58:38,573 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 75eebd318803401cb7fc12c9f9f59530, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734371916993 2024-12-16T17:58:38,579 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#422 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:38,579 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/54a8673d4bd34099be50dbec95e6d58d is 50, key is test_row_0/C:col10/1734371916993/Put/seqid=0 2024-12-16T17:58:38,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742321_1497 (size=12629) 2024-12-16T17:58:38,682 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:38,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-16T17:58:38,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:38,683 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-16T17:58:38,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:38,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:38,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:38,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:38,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:38,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:38,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/7ebfa844a457496e93c5d18da5255bb1 is 50, key is test_row_1/A:col10/1734371917313/Put/seqid=0 2024-12-16T17:58:38,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742322_1498 (size=9757) 2024-12-16T17:58:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-16T17:58:38,967 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/61ad7bbb4f9644b49d9a61df979b1106 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/61ad7bbb4f9644b49d9a61df979b1106 2024-12-16T17:58:38,975 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into 61ad7bbb4f9644b49d9a61df979b1106(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:38,975 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:38,975 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=13, startTime=1734371918550; duration=0sec 2024-12-16T17:58:38,975 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:38,975 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:38,993 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/54a8673d4bd34099be50dbec95e6d58d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/54a8673d4bd34099be50dbec95e6d58d 2024-12-16T17:58:38,997 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into 54a8673d4bd34099be50dbec95e6d58d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:38,997 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:38,997 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=13, startTime=1734371918551; duration=0sec 2024-12-16T17:58:38,997 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:38,997 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:39,093 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/7ebfa844a457496e93c5d18da5255bb1 2024-12-16T17:58:39,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/bdcd0f2d072e4ae68bf93acec1862e0e is 50, key is test_row_1/B:col10/1734371917313/Put/seqid=0 2024-12-16T17:58:39,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742323_1499 (size=9757) 2024-12-16T17:58:39,103 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/bdcd0f2d072e4ae68bf93acec1862e0e 2024-12-16T17:58:39,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/94295c363c1b4631a5f7e068b440af1c is 50, key is test_row_1/C:col10/1734371917313/Put/seqid=0 2024-12-16T17:58:39,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742324_1500 (size=9757) 2024-12-16T17:58:39,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:39,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:39,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371979442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371979443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371979444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371979445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371979446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,522 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/94295c363c1b4631a5f7e068b440af1c 2024-12-16T17:58:39,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/7ebfa844a457496e93c5d18da5255bb1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/7ebfa844a457496e93c5d18da5255bb1 2024-12-16T17:58:39,529 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/7ebfa844a457496e93c5d18da5255bb1, entries=100, sequenceid=219, filesize=9.5 K 2024-12-16T17:58:39,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/bdcd0f2d072e4ae68bf93acec1862e0e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bdcd0f2d072e4ae68bf93acec1862e0e 2024-12-16T17:58:39,533 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bdcd0f2d072e4ae68bf93acec1862e0e, entries=100, sequenceid=219, filesize=9.5 K 2024-12-16T17:58:39,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/94295c363c1b4631a5f7e068b440af1c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/94295c363c1b4631a5f7e068b440af1c 2024-12-16T17:58:39,536 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/94295c363c1b4631a5f7e068b440af1c, entries=100, sequenceid=219, filesize=9.5 K 2024-12-16T17:58:39,537 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for f14d455ff3b60546f0a651dc8cf12d5c in 854ms, sequenceid=219, compaction requested=false 2024-12-16T17:58:39,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:39,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:39,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-16T17:58:39,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-16T17:58:39,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-16T17:58:39,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7680 sec 2024-12-16T17:58:39,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.7710 sec 2024-12-16T17:58:39,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:39,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-16T17:58:39,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:39,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:39,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:39,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:39,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:39,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:39,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/84cd7e81f98943a780d44590ffd2ff26 is 50, key is test_row_0/A:col10/1734371919442/Put/seqid=0 2024-12-16T17:58:39,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371979553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371979554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371979554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742325_1501 (size=14541) 2024-12-16T17:58:39,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/84cd7e81f98943a780d44590ffd2ff26 2024-12-16T17:58:39,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371979557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/050c0b721ab7460e9930532f93e6429f is 50, key is test_row_0/B:col10/1734371919442/Put/seqid=0 2024-12-16T17:58:39,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742326_1502 (size=12151) 2024-12-16T17:58:39,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371979658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371979660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371979660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371979664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371979861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371979863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371979864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371979868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:39,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-16T17:58:39,873 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-16T17:58:39,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:39,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-16T17:58:39,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T17:58:39,874 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:39,875 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:39,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:39,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/050c0b721ab7460e9930532f93e6429f 2024-12-16T17:58:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T17:58:39,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/1f6978e9ef0a48058b453379d6fc4b1b is 50, key is test_row_0/C:col10/1734371919442/Put/seqid=0 2024-12-16T17:58:39,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742327_1503 (size=12151) 2024-12-16T17:58:40,026 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-16T17:58:40,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:40,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371980166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371980168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371980168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371980172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T17:58:40,178 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-16T17:58:40,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:40,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-16T17:58:40,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:40,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:40,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/1f6978e9ef0a48058b453379d6fc4b1b 2024-12-16T17:58:40,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/84cd7e81f98943a780d44590ffd2ff26 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/84cd7e81f98943a780d44590ffd2ff26 2024-12-16T17:58:40,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/84cd7e81f98943a780d44590ffd2ff26, entries=200, sequenceid=251, filesize=14.2 K 2024-12-16T17:58:40,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/050c0b721ab7460e9930532f93e6429f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/050c0b721ab7460e9930532f93e6429f 2024-12-16T17:58:40,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/050c0b721ab7460e9930532f93e6429f, entries=150, sequenceid=251, filesize=11.9 K 2024-12-16T17:58:40,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/1f6978e9ef0a48058b453379d6fc4b1b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/1f6978e9ef0a48058b453379d6fc4b1b 2024-12-16T17:58:40,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/1f6978e9ef0a48058b453379d6fc4b1b, entries=150, sequenceid=251, filesize=11.9 K 2024-12-16T17:58:40,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for f14d455ff3b60546f0a651dc8cf12d5c in 847ms, sequenceid=251, compaction requested=true 2024-12-16T17:58:40,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:40,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:40,396 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:40,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:40,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:40,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:40,396 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:40,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:40,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36927 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:40,397 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,397 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,397 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/61ad7bbb4f9644b49d9a61df979b1106, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/7ebfa844a457496e93c5d18da5255bb1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/84cd7e81f98943a780d44590ffd2ff26] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=36.1 K 2024-12-16T17:58:40,397 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c9db8f2ff86e4e9c9f0b8f0639248c1c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bdcd0f2d072e4ae68bf93acec1862e0e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/050c0b721ab7460e9930532f93e6429f] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=33.7 K 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61ad7bbb4f9644b49d9a61df979b1106, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734371916993 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting c9db8f2ff86e4e9c9f0b8f0639248c1c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734371916993 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting bdcd0f2d072e4ae68bf93acec1862e0e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734371917313 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ebfa844a457496e93c5d18da5255bb1, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734371917313 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 050c0b721ab7460e9930532f93e6429f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734371919442 2024-12-16T17:58:40,397 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84cd7e81f98943a780d44590ffd2ff26, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734371919442 2024-12-16T17:58:40,403 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:40,403 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/37e903c14b124434975c24d40787af4f is 50, key is test_row_0/B:col10/1734371919442/Put/seqid=0 2024-12-16T17:58:40,406 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#430 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:40,406 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/047ce9ef1c95414a8cdaeedddd577c9d is 50, key is test_row_0/A:col10/1734371919442/Put/seqid=0 2024-12-16T17:58:40,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742328_1504 (size=12731) 2024-12-16T17:58:40,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742329_1505 (size=12731) 2024-12-16T17:58:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T17:58:40,483 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-16T17:58:40,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,483 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-16T17:58:40,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:40,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:40,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:40,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:40,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:40,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:40,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/1090afae0d8146f2928c666616ca2cf8 is 50, key is test_row_0/A:col10/1734371919553/Put/seqid=0 2024-12-16T17:58:40,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742330_1506 (size=12201) 2024-12-16T17:58:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:40,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:40,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371980705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371980705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371980706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371980710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,812 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/37e903c14b124434975c24d40787af4f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/37e903c14b124434975c24d40787af4f 2024-12-16T17:58:40,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371980810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371980810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371980811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,815 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into 37e903c14b124434975c24d40787af4f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:40,815 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:40,815 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=13, startTime=1734371920396; duration=0sec 2024-12-16T17:58:40,815 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:40,815 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:40,816 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:40,816 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:40,816 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:40,816 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,816 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/54a8673d4bd34099be50dbec95e6d58d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/94295c363c1b4631a5f7e068b440af1c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/1f6978e9ef0a48058b453379d6fc4b1b] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=33.7 K 2024-12-16T17:58:40,817 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 54a8673d4bd34099be50dbec95e6d58d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734371916993 2024-12-16T17:58:40,817 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 94295c363c1b4631a5f7e068b440af1c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734371917313 2024-12-16T17:58:40,817 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f6978e9ef0a48058b453379d6fc4b1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734371919442 2024-12-16T17:58:40,818 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/047ce9ef1c95414a8cdaeedddd577c9d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/047ce9ef1c95414a8cdaeedddd577c9d 2024-12-16T17:58:40,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:40,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371980815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:40,822 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into 047ce9ef1c95414a8cdaeedddd577c9d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:40,822 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:40,822 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=13, startTime=1734371920396; duration=0sec 2024-12-16T17:58:40,822 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:40,822 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:40,822 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:40,822 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/c169b94adbf34c72b5f51bebc0c0c450 is 50, key is test_row_0/C:col10/1734371919442/Put/seqid=0 2024-12-16T17:58:40,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742331_1507 (size=12731) 2024-12-16T17:58:40,891 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/1090afae0d8146f2928c666616ca2cf8 2024-12-16T17:58:40,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/1d618a3004ab43ef9ca320235325eec4 is 50, key is test_row_0/B:col10/1734371919553/Put/seqid=0 2024-12-16T17:58:40,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742332_1508 (size=12201) 2024-12-16T17:58:40,903 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/1d618a3004ab43ef9ca320235325eec4 2024-12-16T17:58:40,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3933beb632474f059457e6c6943c0c52 is 50, key is test_row_0/C:col10/1734371919553/Put/seqid=0 2024-12-16T17:58:40,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742333_1509 (size=12201) 2024-12-16T17:58:40,911 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3933beb632474f059457e6c6943c0c52 2024-12-16T17:58:40,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/1090afae0d8146f2928c666616ca2cf8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1090afae0d8146f2928c666616ca2cf8 2024-12-16T17:58:40,916 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1090afae0d8146f2928c666616ca2cf8, entries=150, sequenceid=258, filesize=11.9 K 2024-12-16T17:58:40,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/1d618a3004ab43ef9ca320235325eec4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/1d618a3004ab43ef9ca320235325eec4 2024-12-16T17:58:40,918 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/1d618a3004ab43ef9ca320235325eec4, entries=150, sequenceid=258, filesize=11.9 K 2024-12-16T17:58:40,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3933beb632474f059457e6c6943c0c52 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3933beb632474f059457e6c6943c0c52 2024-12-16T17:58:40,921 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3933beb632474f059457e6c6943c0c52, entries=150, sequenceid=258, filesize=11.9 K 2024-12-16T17:58:40,922 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for f14d455ff3b60546f0a651dc8cf12d5c in 439ms, sequenceid=258, compaction requested=false 2024-12-16T17:58:40,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:40,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:40,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-16T17:58:40,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-16T17:58:40,923 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-16T17:58:40,923 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0480 sec 2024-12-16T17:58:40,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.0500 sec 2024-12-16T17:58:40,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T17:58:40,977 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-16T17:58:40,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-16T17:58:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-16T17:58:40,979 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:40,980 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:40,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:41,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:41,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-16T17:58:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:41,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:41,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/e5c4b657bcb940f29410114a3a9770ff is 50, key is test_row_0/A:col10/1734371920698/Put/seqid=0 2024-12-16T17:58:41,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371981022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742334_1510 (size=17181) 2024-12-16T17:58:41,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371981023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371981024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371981024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-16T17:58:41,131 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-16T17:58:41,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:41,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371981127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371981130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371981130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,231 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/c169b94adbf34c72b5f51bebc0c0c450 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/c169b94adbf34c72b5f51bebc0c0c450 2024-12-16T17:58:41,235 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into c169b94adbf34c72b5f51bebc0c0c450(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:41,235 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:41,235 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=13, startTime=1734371920396; duration=0sec 2024-12-16T17:58:41,235 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:41,235 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:41,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-16T17:58:41,283 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-16T17:58:41,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:41,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371981326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371981333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371981337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371981337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/e5c4b657bcb940f29410114a3a9770ff 2024-12-16T17:58:41,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/bc6feb47c7874f978b7613d59e4ac684 is 50, key is test_row_0/B:col10/1734371920698/Put/seqid=0 2024-12-16T17:58:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742335_1511 (size=12301) 2024-12-16T17:58:41,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/bc6feb47c7874f978b7613d59e4ac684 2024-12-16T17:58:41,436 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-16T17:58:41,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:41,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3d4a0dc011ea40c9aac03d90c8d4fb3b is 50, key is test_row_0/C:col10/1734371920698/Put/seqid=0 2024-12-16T17:58:41,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742336_1512 (size=12301) 2024-12-16T17:58:41,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371981454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,456 DEBUG [Thread-2013 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:41,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-16T17:58:41,588 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-16T17:58:41,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:41,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,589 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371981639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371981641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371981643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,741 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-16T17:58:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:41,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:41,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371981828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3d4a0dc011ea40c9aac03d90c8d4fb3b 2024-12-16T17:58:41,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/e5c4b657bcb940f29410114a3a9770ff as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/e5c4b657bcb940f29410114a3a9770ff 2024-12-16T17:58:41,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/e5c4b657bcb940f29410114a3a9770ff, entries=250, sequenceid=291, filesize=16.8 K 2024-12-16T17:58:41,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/bc6feb47c7874f978b7613d59e4ac684 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bc6feb47c7874f978b7613d59e4ac684 2024-12-16T17:58:41,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bc6feb47c7874f978b7613d59e4ac684, entries=150, sequenceid=291, filesize=12.0 K 2024-12-16T17:58:41,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3d4a0dc011ea40c9aac03d90c8d4fb3b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3d4a0dc011ea40c9aac03d90c8d4fb3b 2024-12-16T17:58:41,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3d4a0dc011ea40c9aac03d90c8d4fb3b, entries=150, sequenceid=291, filesize=12.0 K 2024-12-16T17:58:41,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=26.84 KB/27480 for f14d455ff3b60546f0a651dc8cf12d5c in 851ms, sequenceid=291, compaction requested=true 2024-12-16T17:58:41,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:41,869 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:41,869 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:41,870 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42113 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:41,870 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:41,870 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,870 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/047ce9ef1c95414a8cdaeedddd577c9d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1090afae0d8146f2928c666616ca2cf8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/e5c4b657bcb940f29410114a3a9770ff] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=41.1 K 2024-12-16T17:58:41,871 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 047ce9ef1c95414a8cdaeedddd577c9d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734371919442 2024-12-16T17:58:41,871 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1090afae0d8146f2928c666616ca2cf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734371919553 2024-12-16T17:58:41,871 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5c4b657bcb940f29410114a3a9770ff, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371920698 2024-12-16T17:58:41,873 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:41,873 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:41,873 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,873 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/37e903c14b124434975c24d40787af4f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/1d618a3004ab43ef9ca320235325eec4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bc6feb47c7874f978b7613d59e4ac684] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=36.4 K 2024-12-16T17:58:41,873 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 37e903c14b124434975c24d40787af4f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734371919442 2024-12-16T17:58:41,874 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d618a3004ab43ef9ca320235325eec4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734371919553 2024-12-16T17:58:41,874 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting bc6feb47c7874f978b7613d59e4ac684, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371920698 2024-12-16T17:58:41,879 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:41,879 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/a8593e9aefd04061b4d17fc7b97cdc0f is 50, key is test_row_0/A:col10/1734371920698/Put/seqid=0 2024-12-16T17:58:41,881 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#439 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:41,882 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/87bda6a8c10b44bebe17b914aa84c49f is 50, key is test_row_0/B:col10/1734371920698/Put/seqid=0 2024-12-16T17:58:41,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742338_1514 (size=12983) 2024-12-16T17:58:41,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742337_1513 (size=12983) 2024-12-16T17:58:41,893 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:41,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-16T17:58:41,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,894 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-16T17:58:41,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:41,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:41,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:41,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:41,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:41,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:41,898 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/a8593e9aefd04061b4d17fc7b97cdc0f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a8593e9aefd04061b4d17fc7b97cdc0f 2024-12-16T17:58:41,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/66b4830aac3f491993c3f3568d8766a4 is 50, key is test_row_0/A:col10/1734371921018/Put/seqid=0 2024-12-16T17:58:41,902 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into a8593e9aefd04061b4d17fc7b97cdc0f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:41,902 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:41,902 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=13, startTime=1734371921869; duration=0sec 2024-12-16T17:58:41,902 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:41,902 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:41,903 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:41,904 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:41,904 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:41,904 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:41,904 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/c169b94adbf34c72b5f51bebc0c0c450, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3933beb632474f059457e6c6943c0c52, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3d4a0dc011ea40c9aac03d90c8d4fb3b] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=36.4 K 2024-12-16T17:58:41,904 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c169b94adbf34c72b5f51bebc0c0c450, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734371919442 2024-12-16T17:58:41,904 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3933beb632474f059457e6c6943c0c52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734371919553 2024-12-16T17:58:41,904 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d4a0dc011ea40c9aac03d90c8d4fb3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371920698 2024-12-16T17:58:41,912 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:41,912 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/e944ccb291a248f695c61f3b29f87a96 is 50, key is test_row_0/C:col10/1734371920698/Put/seqid=0 2024-12-16T17:58:41,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742339_1515 (size=12301) 2024-12-16T17:58:41,918 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/66b4830aac3f491993c3f3568d8766a4 2024-12-16T17:58:41,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742340_1516 (size=12983) 2024-12-16T17:58:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/343a9b076d724e87bcba2be581ed02be is 50, key is test_row_0/B:col10/1734371921018/Put/seqid=0 2024-12-16T17:58:41,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742341_1517 (size=12301) 2024-12-16T17:58:42,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-16T17:58:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:42,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:42,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371982194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371982194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371982195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,293 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/87bda6a8c10b44bebe17b914aa84c49f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/87bda6a8c10b44bebe17b914aa84c49f 2024-12-16T17:58:42,296 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into 87bda6a8c10b44bebe17b914aa84c49f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:42,296 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:42,296 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=13, startTime=1734371921869; duration=0sec 2024-12-16T17:58:42,297 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:42,297 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:42,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371982299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371982299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371982299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,333 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/e944ccb291a248f695c61f3b29f87a96 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e944ccb291a248f695c61f3b29f87a96 2024-12-16T17:58:42,334 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/343a9b076d724e87bcba2be581ed02be 2024-12-16T17:58:42,337 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into e944ccb291a248f695c61f3b29f87a96(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:42,337 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:42,337 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=13, startTime=1734371921869; duration=0sec 2024-12-16T17:58:42,337 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:42,337 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:42,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/f8e1dbb4f81644d49a8a3d3d16f214b9 is 50, key is test_row_0/C:col10/1734371921018/Put/seqid=0 2024-12-16T17:58:42,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742342_1518 (size=12301) 2024-12-16T17:58:42,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371982504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371982504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371982505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,744 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/f8e1dbb4f81644d49a8a3d3d16f214b9 2024-12-16T17:58:42,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/66b4830aac3f491993c3f3568d8766a4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/66b4830aac3f491993c3f3568d8766a4 2024-12-16T17:58:42,753 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/66b4830aac3f491993c3f3568d8766a4, entries=150, sequenceid=299, filesize=12.0 K 2024-12-16T17:58:42,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/343a9b076d724e87bcba2be581ed02be as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/343a9b076d724e87bcba2be581ed02be 2024-12-16T17:58:42,757 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/343a9b076d724e87bcba2be581ed02be, entries=150, sequenceid=299, filesize=12.0 K 2024-12-16T17:58:42,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/f8e1dbb4f81644d49a8a3d3d16f214b9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f8e1dbb4f81644d49a8a3d3d16f214b9 2024-12-16T17:58:42,761 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f8e1dbb4f81644d49a8a3d3d16f214b9, entries=150, sequenceid=299, filesize=12.0 K 2024-12-16T17:58:42,761 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for f14d455ff3b60546f0a651dc8cf12d5c in 867ms, sequenceid=299, compaction requested=false 2024-12-16T17:58:42,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:42,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:42,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-16T17:58:42,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-16T17:58:42,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-16T17:58:42,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7820 sec 2024-12-16T17:58:42,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.7850 sec 2024-12-16T17:58:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:42,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-16T17:58:42,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:42,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:42,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:42,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:42,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:42,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:42,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/1270b36cf6b9446ba16918ef6a9f0be5 is 50, key is test_row_0/A:col10/1734371922194/Put/seqid=0 2024-12-16T17:58:42,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742343_1519 (size=14741) 2024-12-16T17:58:42,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371982815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371982815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371982818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371982835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371982921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371982923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:42,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371982923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-16T17:58:43,082 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-16T17:58:43,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:43,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-16T17:58:43,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-16T17:58:43,085 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:43,085 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:43,085 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:43,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371983125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371983126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371983126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-16T17:58:43,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/1270b36cf6b9446ba16918ef6a9f0be5 2024-12-16T17:58:43,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/89993d51fc5d4936864381238aab5e3c is 50, key is test_row_0/B:col10/1734371922194/Put/seqid=0 2024-12-16T17:58:43,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742344_1520 (size=12301) 2024-12-16T17:58:43,236 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-16T17:58:43,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:43,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-16T17:58:43,389 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-16T17:58:43,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:43,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371983430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371983432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371983433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,541 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-16T17:58:43,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:43,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/89993d51fc5d4936864381238aab5e3c 2024-12-16T17:58:43,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3e83c066be8941ba93652fb62b5ec4c7 is 50, key is test_row_0/C:col10/1734371922194/Put/seqid=0 2024-12-16T17:58:43,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742345_1521 (size=12301) 2024-12-16T17:58:43,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-16T17:58:43,693 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-16T17:58:43,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:43,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,846 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-16T17:58:43,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:43,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371983936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371983936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:43,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371983939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,998 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:43,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-16T17:58:43,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:43,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:43,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:43,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:44,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3e83c066be8941ba93652fb62b5ec4c7 2024-12-16T17:58:44,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/1270b36cf6b9446ba16918ef6a9f0be5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1270b36cf6b9446ba16918ef6a9f0be5 2024-12-16T17:58:44,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1270b36cf6b9446ba16918ef6a9f0be5, entries=200, sequenceid=332, filesize=14.4 K 2024-12-16T17:58:44,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/89993d51fc5d4936864381238aab5e3c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/89993d51fc5d4936864381238aab5e3c 2024-12-16T17:58:44,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/89993d51fc5d4936864381238aab5e3c, entries=150, sequenceid=332, filesize=12.0 K 2024-12-16T17:58:44,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/3e83c066be8941ba93652fb62b5ec4c7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3e83c066be8941ba93652fb62b5ec4c7 2024-12-16T17:58:44,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3e83c066be8941ba93652fb62b5ec4c7, entries=150, sequenceid=332, filesize=12.0 K 2024-12-16T17:58:44,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for f14d455ff3b60546f0a651dc8cf12d5c in 1237ms, sequenceid=332, compaction requested=true 2024-12-16T17:58:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:44,052 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:44,052 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:44,053 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:44,053 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:44,053 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:44,053 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:44,053 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:44,053 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:44,053 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/87bda6a8c10b44bebe17b914aa84c49f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/343a9b076d724e87bcba2be581ed02be, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/89993d51fc5d4936864381238aab5e3c] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=36.7 K 2024-12-16T17:58:44,053 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a8593e9aefd04061b4d17fc7b97cdc0f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/66b4830aac3f491993c3f3568d8766a4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1270b36cf6b9446ba16918ef6a9f0be5] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=39.1 K 2024-12-16T17:58:44,053 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8593e9aefd04061b4d17fc7b97cdc0f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371920698 2024-12-16T17:58:44,053 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 87bda6a8c10b44bebe17b914aa84c49f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371920698 2024-12-16T17:58:44,053 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66b4830aac3f491993c3f3568d8766a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734371921018 2024-12-16T17:58:44,054 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 343a9b076d724e87bcba2be581ed02be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734371921018 2024-12-16T17:58:44,058 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 89993d51fc5d4936864381238aab5e3c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734371922193 2024-12-16T17:58:44,058 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1270b36cf6b9446ba16918ef6a9f0be5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734371922177 2024-12-16T17:58:44,065 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:44,065 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/9e39f8f1e1f04627ae5423d1de418265 is 50, key is test_row_0/A:col10/1734371922194/Put/seqid=0 2024-12-16T17:58:44,070 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:44,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742346_1522 (size=13085) 2024-12-16T17:58:44,071 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e6b63d52fd164194be39de80cc15fbae is 50, key is test_row_0/B:col10/1734371922194/Put/seqid=0 2024-12-16T17:58:44,076 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/9e39f8f1e1f04627ae5423d1de418265 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/9e39f8f1e1f04627ae5423d1de418265 2024-12-16T17:58:44,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742347_1523 (size=13085) 2024-12-16T17:58:44,081 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into 9e39f8f1e1f04627ae5423d1de418265(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:44,082 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/e6b63d52fd164194be39de80cc15fbae as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e6b63d52fd164194be39de80cc15fbae 2024-12-16T17:58:44,082 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:44,082 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=13, startTime=1734371924052; duration=0sec 2024-12-16T17:58:44,082 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:44,082 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:44,082 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:44,083 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:44,083 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:44,083 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:44,083 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e944ccb291a248f695c61f3b29f87a96, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f8e1dbb4f81644d49a8a3d3d16f214b9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3e83c066be8941ba93652fb62b5ec4c7] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=36.7 K 2024-12-16T17:58:44,084 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e944ccb291a248f695c61f3b29f87a96, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734371920698 2024-12-16T17:58:44,084 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8e1dbb4f81644d49a8a3d3d16f214b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734371921018 2024-12-16T17:58:44,085 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e83c066be8941ba93652fb62b5ec4c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734371922193 2024-12-16T17:58:44,087 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into e6b63d52fd164194be39de80cc15fbae(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:44,087 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:44,087 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=13, startTime=1734371924052; duration=0sec 2024-12-16T17:58:44,088 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:44,088 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:44,092 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:44,092 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/f9b065df24cc4a0dbb4b1dab90fa9126 is 50, key is test_row_0/C:col10/1734371922194/Put/seqid=0 2024-12-16T17:58:44,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742348_1524 (size=13085) 2024-12-16T17:58:44,104 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/f9b065df24cc4a0dbb4b1dab90fa9126 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f9b065df24cc4a0dbb4b1dab90fa9126 2024-12-16T17:58:44,109 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into f9b065df24cc4a0dbb4b1dab90fa9126(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:44,109 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:44,109 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=13, startTime=1734371924052; duration=0sec 2024-12-16T17:58:44,109 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:44,109 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:44,150 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:44,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-16T17:58:44,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:44,151 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-16T17:58:44,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:44,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:44,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:44,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:44,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:44,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:44,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ed164e21d28b40b18d2d9066c95e053e is 50, key is test_row_0/A:col10/1734371922814/Put/seqid=0 2024-12-16T17:58:44,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742349_1525 (size=9857) 2024-12-16T17:58:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-16T17:58:44,563 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ed164e21d28b40b18d2d9066c95e053e 2024-12-16T17:58:44,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/740efa9e595348f1bdd3e1d81dd33b26 is 50, key is test_row_0/B:col10/1734371922814/Put/seqid=0 2024-12-16T17:58:44,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742350_1526 (size=9857) 2024-12-16T17:58:44,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:44,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:44,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371984926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:44,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:44,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371984937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:44,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371984942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:44,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:44,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371984950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:44,972 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/740efa9e595348f1bdd3e1d81dd33b26 2024-12-16T17:58:44,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/d9c1c0b9f2054749b066ec4aefa4f7ca is 50, key is test_row_0/C:col10/1734371922814/Put/seqid=0 2024-12-16T17:58:44,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742351_1527 (size=9857) 2024-12-16T17:58:45,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371985029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:45,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-16T17:58:45,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:45,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371985233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:45,381 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/d9c1c0b9f2054749b066ec4aefa4f7ca 2024-12-16T17:58:45,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/ed164e21d28b40b18d2d9066c95e053e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ed164e21d28b40b18d2d9066c95e053e 2024-12-16T17:58:45,387 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ed164e21d28b40b18d2d9066c95e053e, entries=100, sequenceid=341, filesize=9.6 K 2024-12-16T17:58:45,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/740efa9e595348f1bdd3e1d81dd33b26 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/740efa9e595348f1bdd3e1d81dd33b26 2024-12-16T17:58:45,390 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/740efa9e595348f1bdd3e1d81dd33b26, entries=100, sequenceid=341, filesize=9.6 K 2024-12-16T17:58:45,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/d9c1c0b9f2054749b066ec4aefa4f7ca as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/d9c1c0b9f2054749b066ec4aefa4f7ca 2024-12-16T17:58:45,393 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/d9c1c0b9f2054749b066ec4aefa4f7ca, entries=100, sequenceid=341, filesize=9.6 K 2024-12-16T17:58:45,394 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for f14d455ff3b60546f0a651dc8cf12d5c in 1242ms, sequenceid=341, compaction requested=false 2024-12-16T17:58:45,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:45,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:45,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-16T17:58:45,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-16T17:58:45,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-16T17:58:45,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3090 sec 2024-12-16T17:58:45,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.3120 sec 2024-12-16T17:58:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:45,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-16T17:58:45,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:45,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:45,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:45,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:45,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:45,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:45,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/21f3dd0101634b5c83ce3f9b8f08cf01 is 50, key is test_row_0/A:col10/1734371925469/Put/seqid=0 2024-12-16T17:58:45,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371985477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:45,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742352_1528 (size=12301) 2024-12-16T17:58:45,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:45,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371985537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:45,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:45,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371985579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:45,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371985785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:45,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/21f3dd0101634b5c83ce3f9b8f08cf01 2024-12-16T17:58:45,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/85077b1238da44f5adfc717bfb4b6db8 is 50, key is test_row_0/B:col10/1734371925469/Put/seqid=0 2024-12-16T17:58:45,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742353_1529 (size=12301) 2024-12-16T17:58:45,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/85077b1238da44f5adfc717bfb4b6db8 2024-12-16T17:58:45,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/087c8bcfadee46b0b661e9a0138fee74 is 50, key is test_row_0/C:col10/1734371925469/Put/seqid=0 2024-12-16T17:58:45,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742354_1530 (size=12301) 2024-12-16T17:58:46,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371986042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:46,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371986087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:46,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/087c8bcfadee46b0b661e9a0138fee74 2024-12-16T17:58:46,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/21f3dd0101634b5c83ce3f9b8f08cf01 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/21f3dd0101634b5c83ce3f9b8f08cf01 2024-12-16T17:58:46,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/21f3dd0101634b5c83ce3f9b8f08cf01, entries=150, sequenceid=372, filesize=12.0 K 2024-12-16T17:58:46,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/85077b1238da44f5adfc717bfb4b6db8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/85077b1238da44f5adfc717bfb4b6db8 2024-12-16T17:58:46,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/85077b1238da44f5adfc717bfb4b6db8, entries=150, sequenceid=372, filesize=12.0 K 2024-12-16T17:58:46,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/087c8bcfadee46b0b661e9a0138fee74 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/087c8bcfadee46b0b661e9a0138fee74 2024-12-16T17:58:46,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/087c8bcfadee46b0b661e9a0138fee74, entries=150, sequenceid=372, filesize=12.0 K 2024-12-16T17:58:46,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for f14d455ff3b60546f0a651dc8cf12d5c in 843ms, sequenceid=372, compaction requested=true 2024-12-16T17:58:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:58:46,314 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:58:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:46,314 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f14d455ff3b60546f0a651dc8cf12d5c:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:58:46,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:46,314 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:46,314 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:46,314 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/A is initiating minor compaction (all files) 2024-12-16T17:58:46,314 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/B is initiating minor compaction (all files) 2024-12-16T17:58:46,314 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/A in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:46,314 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/B in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:46,315 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/9e39f8f1e1f04627ae5423d1de418265, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ed164e21d28b40b18d2d9066c95e053e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/21f3dd0101634b5c83ce3f9b8f08cf01] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=34.4 K 2024-12-16T17:58:46,315 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e6b63d52fd164194be39de80cc15fbae, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/740efa9e595348f1bdd3e1d81dd33b26, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/85077b1238da44f5adfc717bfb4b6db8] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=34.4 K 2024-12-16T17:58:46,315 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e6b63d52fd164194be39de80cc15fbae, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734371922193 2024-12-16T17:58:46,315 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e39f8f1e1f04627ae5423d1de418265, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734371922193 2024-12-16T17:58:46,315 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 740efa9e595348f1bdd3e1d81dd33b26, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734371922814 2024-12-16T17:58:46,315 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed164e21d28b40b18d2d9066c95e053e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734371922814 2024-12-16T17:58:46,315 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 85077b1238da44f5adfc717bfb4b6db8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734371924919 2024-12-16T17:58:46,315 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21f3dd0101634b5c83ce3f9b8f08cf01, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734371924919 2024-12-16T17:58:46,321 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#A#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:46,321 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#B#compaction#457 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:46,321 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/69a187a225104968910d5e39ac9208f5 is 50, key is test_row_0/A:col10/1734371925469/Put/seqid=0 2024-12-16T17:58:46,321 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/5fae30e8740a4de2984ab787c1d9aa9c is 50, key is test_row_0/B:col10/1734371925469/Put/seqid=0 2024-12-16T17:58:46,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742356_1532 (size=13187) 2024-12-16T17:58:46,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742355_1531 (size=13187) 2024-12-16T17:58:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:46,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:58:46,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:46,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:46,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:46,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:46,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:46,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:46,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/91179985ec854f1c8624ff12a4cc22b5 is 50, key is test_row_0/A:col10/1734371926597/Put/seqid=0 2024-12-16T17:58:46,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742357_1533 (size=14741) 2024-12-16T17:58:46,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371986688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:46,728 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/5fae30e8740a4de2984ab787c1d9aa9c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/5fae30e8740a4de2984ab787c1d9aa9c 2024-12-16T17:58:46,732 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/B of f14d455ff3b60546f0a651dc8cf12d5c into 5fae30e8740a4de2984ab787c1d9aa9c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:46,732 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:46,732 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/B, priority=13, startTime=1734371926314; duration=0sec 2024-12-16T17:58:46,732 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:58:46,732 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:B 2024-12-16T17:58:46,732 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:58:46,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:58:46,733 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): f14d455ff3b60546f0a651dc8cf12d5c/C is initiating minor compaction (all files) 2024-12-16T17:58:46,733 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f14d455ff3b60546f0a651dc8cf12d5c/C in TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:46,733 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f9b065df24cc4a0dbb4b1dab90fa9126, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/d9c1c0b9f2054749b066ec4aefa4f7ca, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/087c8bcfadee46b0b661e9a0138fee74] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp, totalSize=34.4 K 2024-12-16T17:58:46,734 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting f9b065df24cc4a0dbb4b1dab90fa9126, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734371922193 2024-12-16T17:58:46,734 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/69a187a225104968910d5e39ac9208f5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/69a187a225104968910d5e39ac9208f5 2024-12-16T17:58:46,734 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d9c1c0b9f2054749b066ec4aefa4f7ca, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734371922814 2024-12-16T17:58:46,734 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 087c8bcfadee46b0b661e9a0138fee74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734371924919 2024-12-16T17:58:46,737 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/A of f14d455ff3b60546f0a651dc8cf12d5c into 69a187a225104968910d5e39ac9208f5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:46,737 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:46,737 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/A, priority=13, startTime=1734371926314; duration=0sec 2024-12-16T17:58:46,737 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:46,737 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:A 2024-12-16T17:58:46,739 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f14d455ff3b60546f0a651dc8cf12d5c#C#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:58:46,739 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/11724e4741ce4e5b8ca29a69cccc54cf is 50, key is test_row_0/C:col10/1734371925469/Put/seqid=0 2024-12-16T17:58:46,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742358_1534 (size=13187) 2024-12-16T17:58:46,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371986793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:46,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35488 deadline: 1734371986945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:46,949 DEBUG [Thread-2021 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:46,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:46,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35424 deadline: 1734371986950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:46,953 DEBUG [Thread-2019 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:46,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:46,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35486 deadline: 1734371986960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:46,963 DEBUG [Thread-2015 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:47,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:47,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371986998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/91179985ec854f1c8624ff12a4cc22b5 2024-12-16T17:58:47,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/c268e10bf98d436dbecc3ab64df68b23 is 50, key is test_row_0/B:col10/1734371926597/Put/seqid=0 2024-12-16T17:58:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742359_1535 (size=12301) 2024-12-16T17:58:47,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:47,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371987051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,166 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/11724e4741ce4e5b8ca29a69cccc54cf as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/11724e4741ce4e5b8ca29a69cccc54cf 2024-12-16T17:58:47,169 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f14d455ff3b60546f0a651dc8cf12d5c/C of f14d455ff3b60546f0a651dc8cf12d5c into 11724e4741ce4e5b8ca29a69cccc54cf(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:58:47,169 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:47,169 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., storeName=f14d455ff3b60546f0a651dc8cf12d5c/C, priority=13, startTime=1734371926314; duration=0sec 2024-12-16T17:58:47,169 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:58:47,169 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f14d455ff3b60546f0a651dc8cf12d5c:C 2024-12-16T17:58:47,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-16T17:58:47,189 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-16T17:58:47,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:47,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-16T17:58:47,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-16T17:58:47,191 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:47,191 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:47,192 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:47,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-16T17:58:47,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:47,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371987302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,342 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-16T17:58:47,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:47,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/c268e10bf98d436dbecc3ab64df68b23 2024-12-16T17:58:47,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/6aafd4b922c5430ca2542d822d2e58b2 is 50, key is test_row_0/C:col10/1734371926597/Put/seqid=0 2024-12-16T17:58:47,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742360_1536 (size=12301) 2024-12-16T17:58:47,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-16T17:58:47,495 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-16T17:58:47,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:47,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-16T17:58:47,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:47,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-16T17:58:47,799 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-16T17:58:47,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:47,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:47,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:47,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371987806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/6aafd4b922c5430ca2542d822d2e58b2 2024-12-16T17:58:47,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/91179985ec854f1c8624ff12a4cc22b5 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/91179985ec854f1c8624ff12a4cc22b5 2024-12-16T17:58:47,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/91179985ec854f1c8624ff12a4cc22b5, entries=200, sequenceid=383, filesize=14.4 K 2024-12-16T17:58:47,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/c268e10bf98d436dbecc3ab64df68b23 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c268e10bf98d436dbecc3ab64df68b23 2024-12-16T17:58:47,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c268e10bf98d436dbecc3ab64df68b23, entries=150, sequenceid=383, filesize=12.0 K 2024-12-16T17:58:47,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/6aafd4b922c5430ca2542d822d2e58b2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6aafd4b922c5430ca2542d822d2e58b2 2024-12-16T17:58:47,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6aafd4b922c5430ca2542d822d2e58b2, entries=150, sequenceid=383, filesize=12.0 K 2024-12-16T17:58:47,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f14d455ff3b60546f0a651dc8cf12d5c in 1238ms, sequenceid=383, compaction requested=false 2024-12-16T17:58:47,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:47,952 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:47,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-16T17:58:47,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:47,952 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-16T17:58:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:47,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:47,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/36267f0f50744dfb8ebb94fda220b4a9 is 50, key is test_row_0/A:col10/1734371926683/Put/seqid=0 2024-12-16T17:58:47,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742361_1537 (size=12301) 2024-12-16T17:58:48,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-16T17:58:48,360 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/36267f0f50744dfb8ebb94fda220b4a9 2024-12-16T17:58:48,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/384560b454e24477a19c900241b7f51b is 50, key is test_row_0/B:col10/1734371926683/Put/seqid=0 2024-12-16T17:58:48,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742362_1538 (size=12301) 2024-12-16T17:58:48,770 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/384560b454e24477a19c900241b7f51b 2024-12-16T17:58:48,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/730a9ba4e35743ae928c770538f093ab is 50, key is test_row_0/C:col10/1734371926683/Put/seqid=0 2024-12-16T17:58:48,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742363_1539 (size=12301) 2024-12-16T17:58:48,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:48,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:48,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371988836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:48,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371988941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:49,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:49,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35482 deadline: 1734371989057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:49,061 DEBUG [Thread-2017 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c., hostname=3609ad07831c,39733,1734371789085, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T17:58:49,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:49,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35480 deadline: 1734371989145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:49,185 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/730a9ba4e35743ae928c770538f093ab 2024-12-16T17:58:49,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/36267f0f50744dfb8ebb94fda220b4a9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/36267f0f50744dfb8ebb94fda220b4a9 2024-12-16T17:58:49,191 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/36267f0f50744dfb8ebb94fda220b4a9, entries=150, sequenceid=411, filesize=12.0 K 2024-12-16T17:58:49,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/384560b454e24477a19c900241b7f51b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/384560b454e24477a19c900241b7f51b 2024-12-16T17:58:49,194 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/384560b454e24477a19c900241b7f51b, entries=150, sequenceid=411, filesize=12.0 K 2024-12-16T17:58:49,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/730a9ba4e35743ae928c770538f093ab as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/730a9ba4e35743ae928c770538f093ab 2024-12-16T17:58:49,197 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/730a9ba4e35743ae928c770538f093ab, entries=150, sequenceid=411, filesize=12.0 K 2024-12-16T17:58:49,198 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f14d455ff3b60546f0a651dc8cf12d5c in 1246ms, sequenceid=411, compaction requested=true 2024-12-16T17:58:49,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:49,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:49,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-16T17:58:49,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-16T17:58:49,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-16T17:58:49,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0080 sec 2024-12-16T17:58:49,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.0100 sec 2024-12-16T17:58:49,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-16T17:58:49,294 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-16T17:58:49,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:49,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-16T17:58:49,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T17:58:49,296 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:49,297 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:49,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:49,354 DEBUG [Thread-2028 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22a055db to 127.0.0.1:49190 2024-12-16T17:58:49,354 DEBUG [Thread-2028 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:49,354 DEBUG [Thread-2024 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f179203 to 127.0.0.1:49190 2024-12-16T17:58:49,354 DEBUG [Thread-2024 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:49,355 DEBUG [Thread-2032 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x59494c51 to 127.0.0.1:49190 2024-12-16T17:58:49,355 DEBUG [Thread-2032 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:49,357 DEBUG [Thread-2030 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x523a2789 to 127.0.0.1:49190 2024-12-16T17:58:49,357 DEBUG [Thread-2030 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:49,357 DEBUG [Thread-2026 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0992ece1 to 127.0.0.1:49190 2024-12-16T17:58:49,357 DEBUG [Thread-2026 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T17:58:49,448 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:49,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-16T17:58:49,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:49,450 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:58:49,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:49,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:49,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:49,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:49,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:49,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:49,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. as already flushing 2024-12-16T17:58:49,454 DEBUG [Thread-2013 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x329b4b28 to 127.0.0.1:49190 2024-12-16T17:58:49,454 DEBUG [Thread-2013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:49,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/6540b2ac5a794fff9436ded3263dc4a1 is 50, key is test_row_0/A:col10/1734371928830/Put/seqid=0 2024-12-16T17:58:49,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742364_1540 (size=12301) 2024-12-16T17:58:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T17:58:49,861 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/6540b2ac5a794fff9436ded3263dc4a1 2024-12-16T17:58:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/f618ea080fb148849cf32184ea8b45a4 is 50, key is test_row_0/B:col10/1734371928830/Put/seqid=0 2024-12-16T17:58:49,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742365_1541 (size=12301) 2024-12-16T17:58:49,878 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/f618ea080fb148849cf32184ea8b45a4 2024-12-16T17:58:49,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/becbe0a2d6ad4434816241354fd68344 is 50, key is test_row_0/C:col10/1734371928830/Put/seqid=0 2024-12-16T17:58:49,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742366_1542 (size=12301) 2024-12-16T17:58:49,888 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/becbe0a2d6ad4434816241354fd68344 2024-12-16T17:58:49,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/6540b2ac5a794fff9436ded3263dc4a1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6540b2ac5a794fff9436ded3263dc4a1 2024-12-16T17:58:49,896 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6540b2ac5a794fff9436ded3263dc4a1, entries=150, sequenceid=422, filesize=12.0 K 2024-12-16T17:58:49,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/f618ea080fb148849cf32184ea8b45a4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/f618ea080fb148849cf32184ea8b45a4 2024-12-16T17:58:49,900 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/f618ea080fb148849cf32184ea8b45a4, entries=150, sequenceid=422, filesize=12.0 K 2024-12-16T17:58:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T17:58:49,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/becbe0a2d6ad4434816241354fd68344 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/becbe0a2d6ad4434816241354fd68344 2024-12-16T17:58:49,904 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/becbe0a2d6ad4434816241354fd68344, entries=150, sequenceid=422, filesize=12.0 K 2024-12-16T17:58:49,904 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=6.71 KB/6870 for f14d455ff3b60546f0a651dc8cf12d5c in 454ms, sequenceid=422, compaction requested=true 2024-12-16T17:58:49,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:49,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:49,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-16T17:58:49,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-16T17:58:49,906 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-16T17:58:49,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 608 msec 2024-12-16T17:58:49,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 612 msec 2024-12-16T17:58:50,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T17:58:50,403 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-16T17:58:50,972 DEBUG [Thread-2015 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x198eb3f8 to 127.0.0.1:49190 2024-12-16T17:58:50,972 DEBUG [Thread-2015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:50,975 DEBUG [Thread-2021 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e527c5 to 127.0.0.1:49190 2024-12-16T17:58:50,975 DEBUG [Thread-2021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:50,987 DEBUG [Thread-2019 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53df282d to 127.0.0.1:49190 2024-12-16T17:58:50,987 DEBUG [Thread-2019 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:53,090 DEBUG [Thread-2017 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x339e38e9 to 127.0.0.1:49190 2024-12-16T17:58:53,091 DEBUG [Thread-2017 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3809 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11427 rows 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3809 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11427 rows 2024-12-16T17:58:53,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3780 2024-12-16T17:58:53,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11340 rows 2024-12-16T17:58:53,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3779 2024-12-16T17:58:53,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11335 rows 2024-12-16T17:58:53,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3786 2024-12-16T17:58:53,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11356 rows 2024-12-16T17:58:53,092 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T17:58:53,092 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a5bc453 to 127.0.0.1:49190 2024-12-16T17:58:53,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:58:53,095 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-16T17:58:53,095 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-16T17:58:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-16T17:58:53,099 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371933098"}]},"ts":"1734371933098"} 2024-12-16T17:58:53,099 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-16T17:58:53,152 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-16T17:58:53,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:58:53,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f14d455ff3b60546f0a651dc8cf12d5c, UNASSIGN}] 2024-12-16T17:58:53,157 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=140, ppid=139, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f14d455ff3b60546f0a651dc8cf12d5c, UNASSIGN 2024-12-16T17:58:53,158 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=f14d455ff3b60546f0a651dc8cf12d5c, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:53,159 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:58:53,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; CloseRegionProcedure f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:58:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-16T17:58:53,312 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:53,313 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(124): Close f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:53,313 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:58:53,313 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1681): Closing f14d455ff3b60546f0a651dc8cf12d5c, disabling compactions & flushes 2024-12-16T17:58:53,313 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:53,313 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:53,313 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. after waiting 0 ms 2024-12-16T17:58:53,313 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:53,314 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(2837): Flushing f14d455ff3b60546f0a651dc8cf12d5c 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-16T17:58:53,314 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=A 2024-12-16T17:58:53,315 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:53,315 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=B 2024-12-16T17:58:53,315 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:53,315 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f14d455ff3b60546f0a651dc8cf12d5c, store=C 2024-12-16T17:58:53,315 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:53,321 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/5b8eef3a41c74f758662f5e0557a09a2 is 50, key is test_row_0/A:col10/1734371930969/Put/seqid=0 2024-12-16T17:58:53,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742367_1543 (size=12301) 2024-12-16T17:58:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-16T17:58:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-16T17:58:53,727 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/5b8eef3a41c74f758662f5e0557a09a2 2024-12-16T17:58:53,738 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/14b6c9756f264b8490303217555ab7da is 50, key is test_row_0/B:col10/1734371930969/Put/seqid=0 2024-12-16T17:58:53,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742368_1544 (size=12301) 2024-12-16T17:58:54,143 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/14b6c9756f264b8490303217555ab7da 2024-12-16T17:58:54,154 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/4b20c4a874054d668c5cdae9bf72dc41 is 50, key is test_row_0/C:col10/1734371930969/Put/seqid=0 2024-12-16T17:58:54,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742369_1545 (size=12301) 2024-12-16T17:58:54,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-16T17:58:54,559 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/4b20c4a874054d668c5cdae9bf72dc41 2024-12-16T17:58:54,569 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/A/5b8eef3a41c74f758662f5e0557a09a2 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/5b8eef3a41c74f758662f5e0557a09a2 2024-12-16T17:58:54,574 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/5b8eef3a41c74f758662f5e0557a09a2, entries=150, sequenceid=430, filesize=12.0 K 2024-12-16T17:58:54,574 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/B/14b6c9756f264b8490303217555ab7da as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/14b6c9756f264b8490303217555ab7da 2024-12-16T17:58:54,578 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/14b6c9756f264b8490303217555ab7da, entries=150, sequenceid=430, filesize=12.0 K 2024-12-16T17:58:54,579 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/.tmp/C/4b20c4a874054d668c5cdae9bf72dc41 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/4b20c4a874054d668c5cdae9bf72dc41 2024-12-16T17:58:54,582 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/4b20c4a874054d668c5cdae9bf72dc41, entries=150, sequenceid=430, filesize=12.0 K 2024-12-16T17:58:54,583 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for f14d455ff3b60546f0a651dc8cf12d5c in 1270ms, sequenceid=430, compaction requested=true 2024-12-16T17:58:54,584 DEBUG [StoreCloser-TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/842193dbac8b42018e5fcf9924469536, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/2b538a89aa2b4dfc988362f0e699d81f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/c3e8b2af92234f888643b88ca11e8a36, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/23102372950e442f8c5ea323226c4c5c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/db0b7eec8e4b45c48f2f3cea70858c67, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/8e7da218410644a3b71ac4378cd45751, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/12999b5a21b94f258fe9c5e0a92a57c1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ef7c5e301f5f4f6c8d83bbd5c9e416b7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6935df28d00e490aadba59790202db4a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/03a7b5816df64c94b9359fabcf889428, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/3175bad5261e4181baeb3f43c70f9c4d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/028c7d69a57a4729aed44044fbae59e7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ca837e5e4a4b4588ab991384e8abd39e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a28736933768401da0f3e79b87f68584, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/61ad7bbb4f9644b49d9a61df979b1106, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/7ebfa844a457496e93c5d18da5255bb1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/84cd7e81f98943a780d44590ffd2ff26, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/047ce9ef1c95414a8cdaeedddd577c9d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1090afae0d8146f2928c666616ca2cf8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/e5c4b657bcb940f29410114a3a9770ff, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a8593e9aefd04061b4d17fc7b97cdc0f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/66b4830aac3f491993c3f3568d8766a4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1270b36cf6b9446ba16918ef6a9f0be5, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/9e39f8f1e1f04627ae5423d1de418265, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ed164e21d28b40b18d2d9066c95e053e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/21f3dd0101634b5c83ce3f9b8f08cf01] to archive 2024-12-16T17:58:54,584 DEBUG [StoreCloser-TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/842193dbac8b42018e5fcf9924469536 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/842193dbac8b42018e5fcf9924469536 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/db0b7eec8e4b45c48f2f3cea70858c67 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/db0b7eec8e4b45c48f2f3cea70858c67 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/c3e8b2af92234f888643b88ca11e8a36 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/c3e8b2af92234f888643b88ca11e8a36 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/2b538a89aa2b4dfc988362f0e699d81f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/2b538a89aa2b4dfc988362f0e699d81f 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/8e7da218410644a3b71ac4378cd45751 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/8e7da218410644a3b71ac4378cd45751 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/23102372950e442f8c5ea323226c4c5c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/23102372950e442f8c5ea323226c4c5c 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ef7c5e301f5f4f6c8d83bbd5c9e416b7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ef7c5e301f5f4f6c8d83bbd5c9e416b7 2024-12-16T17:58:54,587 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/12999b5a21b94f258fe9c5e0a92a57c1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/12999b5a21b94f258fe9c5e0a92a57c1 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6935df28d00e490aadba59790202db4a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6935df28d00e490aadba59790202db4a 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/03a7b5816df64c94b9359fabcf889428 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/03a7b5816df64c94b9359fabcf889428 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/028c7d69a57a4729aed44044fbae59e7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/028c7d69a57a4729aed44044fbae59e7 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/61ad7bbb4f9644b49d9a61df979b1106 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/61ad7bbb4f9644b49d9a61df979b1106 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ca837e5e4a4b4588ab991384e8abd39e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ca837e5e4a4b4588ab991384e8abd39e 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a28736933768401da0f3e79b87f68584 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a28736933768401da0f3e79b87f68584 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/7ebfa844a457496e93c5d18da5255bb1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/7ebfa844a457496e93c5d18da5255bb1 2024-12-16T17:58:54,589 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/3175bad5261e4181baeb3f43c70f9c4d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/3175bad5261e4181baeb3f43c70f9c4d 2024-12-16T17:58:54,590 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/84cd7e81f98943a780d44590ffd2ff26 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/84cd7e81f98943a780d44590ffd2ff26 2024-12-16T17:58:54,591 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/e5c4b657bcb940f29410114a3a9770ff to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/e5c4b657bcb940f29410114a3a9770ff 2024-12-16T17:58:54,591 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/66b4830aac3f491993c3f3568d8766a4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/66b4830aac3f491993c3f3568d8766a4 2024-12-16T17:58:54,591 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1090afae0d8146f2928c666616ca2cf8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1090afae0d8146f2928c666616ca2cf8 2024-12-16T17:58:54,591 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/047ce9ef1c95414a8cdaeedddd577c9d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/047ce9ef1c95414a8cdaeedddd577c9d 2024-12-16T17:58:54,591 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a8593e9aefd04061b4d17fc7b97cdc0f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/a8593e9aefd04061b4d17fc7b97cdc0f 2024-12-16T17:58:54,591 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/9e39f8f1e1f04627ae5423d1de418265 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/9e39f8f1e1f04627ae5423d1de418265 2024-12-16T17:58:54,592 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1270b36cf6b9446ba16918ef6a9f0be5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/1270b36cf6b9446ba16918ef6a9f0be5 2024-12-16T17:58:54,592 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ed164e21d28b40b18d2d9066c95e053e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/ed164e21d28b40b18d2d9066c95e053e 2024-12-16T17:58:54,592 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/21f3dd0101634b5c83ce3f9b8f08cf01 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/21f3dd0101634b5c83ce3f9b8f08cf01 2024-12-16T17:58:54,594 DEBUG [StoreCloser-TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cf8991069ad04d12b36cfa5c2cf08c15, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9808bc9bd8024435b3b678911af5d4b0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6c35945ff87f4e058f7617e7088d9bfb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6f81c30f127d4c21a87aec9ce9e33636, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9f5312e09b894d17bf196375057a0edb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e328a284fdce4a9f85219a6d87165bbe, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/389c1f1159c54c96827698c05c99ef58, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/d6c6e52a8cac4df78be947ad31f240a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cc5b7c5119184ecea19d1f5cb70d4487, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e7c8c35209ed4199881958f1b9e0d8f7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/48335edbd9514f6eb1f131d99c2dab23, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/b8481288c079486f82d2e82a057bdeb1, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/effeceb4c20f4119ba8e1b0e5760b134, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c9db8f2ff86e4e9c9f0b8f0639248c1c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/0b84b959c2df4d8d84caddd51a132cfc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bdcd0f2d072e4ae68bf93acec1862e0e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/37e903c14b124434975c24d40787af4f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/050c0b721ab7460e9930532f93e6429f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/1d618a3004ab43ef9ca320235325eec4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/87bda6a8c10b44bebe17b914aa84c49f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bc6feb47c7874f978b7613d59e4ac684, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/343a9b076d724e87bcba2be581ed02be, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e6b63d52fd164194be39de80cc15fbae, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/89993d51fc5d4936864381238aab5e3c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/740efa9e595348f1bdd3e1d81dd33b26, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/85077b1238da44f5adfc717bfb4b6db8] to archive 2024-12-16T17:58:54,594 DEBUG [StoreCloser-TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cf8991069ad04d12b36cfa5c2cf08c15 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cf8991069ad04d12b36cfa5c2cf08c15 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6c35945ff87f4e058f7617e7088d9bfb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6c35945ff87f4e058f7617e7088d9bfb 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9808bc9bd8024435b3b678911af5d4b0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9808bc9bd8024435b3b678911af5d4b0 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9f5312e09b894d17bf196375057a0edb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/9f5312e09b894d17bf196375057a0edb 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6f81c30f127d4c21a87aec9ce9e33636 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/6f81c30f127d4c21a87aec9ce9e33636 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e328a284fdce4a9f85219a6d87165bbe to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e328a284fdce4a9f85219a6d87165bbe 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/d6c6e52a8cac4df78be947ad31f240a7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/d6c6e52a8cac4df78be947ad31f240a7 2024-12-16T17:58:54,596 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/389c1f1159c54c96827698c05c99ef58 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/389c1f1159c54c96827698c05c99ef58 2024-12-16T17:58:54,597 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cc5b7c5119184ecea19d1f5cb70d4487 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/cc5b7c5119184ecea19d1f5cb70d4487 2024-12-16T17:58:54,597 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e7c8c35209ed4199881958f1b9e0d8f7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e7c8c35209ed4199881958f1b9e0d8f7 2024-12-16T17:58:54,597 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/48335edbd9514f6eb1f131d99c2dab23 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/48335edbd9514f6eb1f131d99c2dab23 2024-12-16T17:58:54,597 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/effeceb4c20f4119ba8e1b0e5760b134 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/effeceb4c20f4119ba8e1b0e5760b134 2024-12-16T17:58:54,597 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/0b84b959c2df4d8d84caddd51a132cfc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/0b84b959c2df4d8d84caddd51a132cfc 2024-12-16T17:58:54,597 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/b8481288c079486f82d2e82a057bdeb1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/b8481288c079486f82d2e82a057bdeb1 2024-12-16T17:58:54,597 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c9db8f2ff86e4e9c9f0b8f0639248c1c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c9db8f2ff86e4e9c9f0b8f0639248c1c 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bdcd0f2d072e4ae68bf93acec1862e0e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bdcd0f2d072e4ae68bf93acec1862e0e 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/1d618a3004ab43ef9ca320235325eec4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/1d618a3004ab43ef9ca320235325eec4 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/050c0b721ab7460e9930532f93e6429f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/050c0b721ab7460e9930532f93e6429f 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/37e903c14b124434975c24d40787af4f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/37e903c14b124434975c24d40787af4f 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/87bda6a8c10b44bebe17b914aa84c49f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/87bda6a8c10b44bebe17b914aa84c49f 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bc6feb47c7874f978b7613d59e4ac684 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/bc6feb47c7874f978b7613d59e4ac684 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/343a9b076d724e87bcba2be581ed02be to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/343a9b076d724e87bcba2be581ed02be 2024-12-16T17:58:54,598 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e6b63d52fd164194be39de80cc15fbae to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/e6b63d52fd164194be39de80cc15fbae 2024-12-16T17:58:54,599 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/89993d51fc5d4936864381238aab5e3c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/89993d51fc5d4936864381238aab5e3c 2024-12-16T17:58:54,599 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/740efa9e595348f1bdd3e1d81dd33b26 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/740efa9e595348f1bdd3e1d81dd33b26 2024-12-16T17:58:54,599 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/85077b1238da44f5adfc717bfb4b6db8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/85077b1238da44f5adfc717bfb4b6db8 2024-12-16T17:58:54,600 DEBUG [StoreCloser-TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/52a65fa7bce146b685e460f3821bcf78, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/91c5d4a349f142c39f850ee27741357f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f643e6aee8714504afdd7ab73d31e955, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/5098bc816b524042a7de236ea05f4b2e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/db4748bda3b44426b922300557d646b9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/ada8348084074b93a2f92f8270e1dcbe, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6c076be5ba374c5f8e7f7df3e1cb4578, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b75c417e400c4050baa5182da49042dc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/7c896520f2194ecd9942c205fca8009b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e31958c7a9c946b295d968d09ae561c3, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/a0753d32d8484fdf9da23ceff39a024c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b62ecf0392b247199939e0d351e30dae, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/80841d2885d44c1bab067a3bc285eb0a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/54a8673d4bd34099be50dbec95e6d58d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/75eebd318803401cb7fc12c9f9f59530, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/94295c363c1b4631a5f7e068b440af1c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/c169b94adbf34c72b5f51bebc0c0c450, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/1f6978e9ef0a48058b453379d6fc4b1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3933beb632474f059457e6c6943c0c52, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e944ccb291a248f695c61f3b29f87a96, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3d4a0dc011ea40c9aac03d90c8d4fb3b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f8e1dbb4f81644d49a8a3d3d16f214b9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f9b065df24cc4a0dbb4b1dab90fa9126, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3e83c066be8941ba93652fb62b5ec4c7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/d9c1c0b9f2054749b066ec4aefa4f7ca, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/087c8bcfadee46b0b661e9a0138fee74] to archive 2024-12-16T17:58:54,600 DEBUG [StoreCloser-TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:58:54,601 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/52a65fa7bce146b685e460f3821bcf78 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/52a65fa7bce146b685e460f3821bcf78 2024-12-16T17:58:54,601 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f643e6aee8714504afdd7ab73d31e955 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f643e6aee8714504afdd7ab73d31e955 2024-12-16T17:58:54,601 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/91c5d4a349f142c39f850ee27741357f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/91c5d4a349f142c39f850ee27741357f 2024-12-16T17:58:54,602 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/db4748bda3b44426b922300557d646b9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/db4748bda3b44426b922300557d646b9 2024-12-16T17:58:54,602 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/ada8348084074b93a2f92f8270e1dcbe to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/ada8348084074b93a2f92f8270e1dcbe 2024-12-16T17:58:54,602 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6c076be5ba374c5f8e7f7df3e1cb4578 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6c076be5ba374c5f8e7f7df3e1cb4578 2024-12-16T17:58:54,602 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b75c417e400c4050baa5182da49042dc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b75c417e400c4050baa5182da49042dc 2024-12-16T17:58:54,602 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/5098bc816b524042a7de236ea05f4b2e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/5098bc816b524042a7de236ea05f4b2e 2024-12-16T17:58:54,602 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/a0753d32d8484fdf9da23ceff39a024c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/a0753d32d8484fdf9da23ceff39a024c 2024-12-16T17:58:54,602 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/7c896520f2194ecd9942c205fca8009b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/7c896520f2194ecd9942c205fca8009b 2024-12-16T17:58:54,603 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e31958c7a9c946b295d968d09ae561c3 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e31958c7a9c946b295d968d09ae561c3 2024-12-16T17:58:54,603 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/80841d2885d44c1bab067a3bc285eb0a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/80841d2885d44c1bab067a3bc285eb0a 2024-12-16T17:58:54,603 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b62ecf0392b247199939e0d351e30dae to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/b62ecf0392b247199939e0d351e30dae 2024-12-16T17:58:54,603 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/54a8673d4bd34099be50dbec95e6d58d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/54a8673d4bd34099be50dbec95e6d58d 2024-12-16T17:58:54,603 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/75eebd318803401cb7fc12c9f9f59530 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/75eebd318803401cb7fc12c9f9f59530 2024-12-16T17:58:54,603 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/94295c363c1b4631a5f7e068b440af1c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/94295c363c1b4631a5f7e068b440af1c 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/c169b94adbf34c72b5f51bebc0c0c450 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/c169b94adbf34c72b5f51bebc0c0c450 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/1f6978e9ef0a48058b453379d6fc4b1b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/1f6978e9ef0a48058b453379d6fc4b1b 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e944ccb291a248f695c61f3b29f87a96 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/e944ccb291a248f695c61f3b29f87a96 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3933beb632474f059457e6c6943c0c52 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3933beb632474f059457e6c6943c0c52 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3d4a0dc011ea40c9aac03d90c8d4fb3b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3d4a0dc011ea40c9aac03d90c8d4fb3b 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f9b065df24cc4a0dbb4b1dab90fa9126 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f9b065df24cc4a0dbb4b1dab90fa9126 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f8e1dbb4f81644d49a8a3d3d16f214b9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/f8e1dbb4f81644d49a8a3d3d16f214b9 2024-12-16T17:58:54,604 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3e83c066be8941ba93652fb62b5ec4c7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/3e83c066be8941ba93652fb62b5ec4c7 2024-12-16T17:58:54,605 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/087c8bcfadee46b0b661e9a0138fee74 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/087c8bcfadee46b0b661e9a0138fee74 2024-12-16T17:58:54,605 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/d9c1c0b9f2054749b066ec4aefa4f7ca to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/d9c1c0b9f2054749b066ec4aefa4f7ca 2024-12-16T17:58:54,607 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/recovered.edits/433.seqid, newMaxSeqId=433, maxSeqId=1 2024-12-16T17:58:54,608 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c. 2024-12-16T17:58:54,608 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1635): Region close journal for f14d455ff3b60546f0a651dc8cf12d5c: 2024-12-16T17:58:54,609 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(170): Closed f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:54,609 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=f14d455ff3b60546f0a651dc8cf12d5c, regionState=CLOSED 2024-12-16T17:58:54,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-16T17:58:54,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseRegionProcedure f14d455ff3b60546f0a651dc8cf12d5c, server=3609ad07831c,39733,1734371789085 in 1.4510 sec 2024-12-16T17:58:54,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=139 2024-12-16T17:58:54,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=139, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f14d455ff3b60546f0a651dc8cf12d5c, UNASSIGN in 1.4550 sec 2024-12-16T17:58:54,612 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-16T17:58:54,612 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4580 sec 2024-12-16T17:58:54,613 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371934613"}]},"ts":"1734371934613"} 2024-12-16T17:58:54,614 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-16T17:58:54,652 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-16T17:58:54,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5580 sec 2024-12-16T17:58:55,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-16T17:58:55,206 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-16T17:58:55,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-16T17:58:55,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:55,208 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=142, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:55,208 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=142, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:55,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-16T17:58:55,210 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:55,211 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/recovered.edits] 2024-12-16T17:58:55,215 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6540b2ac5a794fff9436ded3263dc4a1 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/6540b2ac5a794fff9436ded3263dc4a1 2024-12-16T17:58:55,215 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/36267f0f50744dfb8ebb94fda220b4a9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/36267f0f50744dfb8ebb94fda220b4a9 2024-12-16T17:58:55,215 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/69a187a225104968910d5e39ac9208f5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/69a187a225104968910d5e39ac9208f5 2024-12-16T17:58:55,215 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/91179985ec854f1c8624ff12a4cc22b5 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/91179985ec854f1c8624ff12a4cc22b5 2024-12-16T17:58:55,215 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/5b8eef3a41c74f758662f5e0557a09a2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/A/5b8eef3a41c74f758662f5e0557a09a2 2024-12-16T17:58:55,218 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/384560b454e24477a19c900241b7f51b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/384560b454e24477a19c900241b7f51b 2024-12-16T17:58:55,218 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/14b6c9756f264b8490303217555ab7da to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/14b6c9756f264b8490303217555ab7da 2024-12-16T17:58:55,218 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/5fae30e8740a4de2984ab787c1d9aa9c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/5fae30e8740a4de2984ab787c1d9aa9c 2024-12-16T17:58:55,218 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/f618ea080fb148849cf32184ea8b45a4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/f618ea080fb148849cf32184ea8b45a4 2024-12-16T17:58:55,218 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c268e10bf98d436dbecc3ab64df68b23 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/B/c268e10bf98d436dbecc3ab64df68b23 2024-12-16T17:58:55,221 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/4b20c4a874054d668c5cdae9bf72dc41 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/4b20c4a874054d668c5cdae9bf72dc41 2024-12-16T17:58:55,221 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6aafd4b922c5430ca2542d822d2e58b2 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/6aafd4b922c5430ca2542d822d2e58b2 2024-12-16T17:58:55,221 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/730a9ba4e35743ae928c770538f093ab to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/730a9ba4e35743ae928c770538f093ab 2024-12-16T17:58:55,221 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/11724e4741ce4e5b8ca29a69cccc54cf to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/11724e4741ce4e5b8ca29a69cccc54cf 2024-12-16T17:58:55,221 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/becbe0a2d6ad4434816241354fd68344 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/C/becbe0a2d6ad4434816241354fd68344 2024-12-16T17:58:55,224 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/recovered.edits/433.seqid to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c/recovered.edits/433.seqid 2024-12-16T17:58:55,224 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/f14d455ff3b60546f0a651dc8cf12d5c 2024-12-16T17:58:55,225 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-16T17:58:55,226 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=142, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:55,227 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-16T17:58:55,229 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-16T17:58:55,230 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=142, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:55,230 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-16T17:58:55,230 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734371935230"}]},"ts":"9223372036854775807"} 2024-12-16T17:58:55,234 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T17:58:55,234 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f14d455ff3b60546f0a651dc8cf12d5c, NAME => 'TestAcidGuarantees,,1734371907068.f14d455ff3b60546f0a651dc8cf12d5c.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T17:58:55,234 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-16T17:58:55,234 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734371935234"}]},"ts":"9223372036854775807"} 2024-12-16T17:58:55,236 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-16T17:58:55,269 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=142, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:55,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 63 msec 2024-12-16T17:58:55,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-16T17:58:55,309 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-16T17:58:55,321 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 247), OpenFileDescriptor=445 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=409 (was 453), ProcessCount=11 (was 11), AvailableMemoryMB=3092 (was 3099) 2024-12-16T17:58:55,329 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=409, ProcessCount=11, AvailableMemoryMB=3094 2024-12-16T17:58:55,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:58:55,330 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:58:55,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:55,331 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T17:58:55,332 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:55,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 143 2024-12-16T17:58:55,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-16T17:58:55,332 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T17:58:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742370_1546 (size=963) 2024-12-16T17:58:55,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-16T17:58:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-16T17:58:55,747 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4 2024-12-16T17:58:55,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742371_1547 (size=53) 2024-12-16T17:58:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-16T17:58:56,157 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:58:56,158 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7be8b4dcb0e9f81dbd68149495fe8709, disabling compactions & flushes 2024-12-16T17:58:56,158 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,158 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,158 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. after waiting 0 ms 2024-12-16T17:58:56,158 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,158 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,158 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:58:56,161 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T17:58:56,161 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734371936161"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734371936161"}]},"ts":"1734371936161"} 2024-12-16T17:58:56,163 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T17:58:56,164 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T17:58:56,164 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371936164"}]},"ts":"1734371936164"} 2024-12-16T17:58:56,165 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-16T17:58:56,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, ASSIGN}] 2024-12-16T17:58:56,238 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, ASSIGN 2024-12-16T17:58:56,239 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, ASSIGN; state=OFFLINE, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=false 2024-12-16T17:58:56,390 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:56,393 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; OpenRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:58:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-16T17:58:56,547 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:56,553 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,553 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7285): Opening region: {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:58:56,553 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,553 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:58:56,553 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7327): checking encryption for 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,553 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7330): checking classloading for 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,555 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,556 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:56,556 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7be8b4dcb0e9f81dbd68149495fe8709 columnFamilyName A 2024-12-16T17:58:56,556 DEBUG [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:56,557 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(327): Store=7be8b4dcb0e9f81dbd68149495fe8709/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:56,557 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,558 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:56,558 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7be8b4dcb0e9f81dbd68149495fe8709 columnFamilyName B 2024-12-16T17:58:56,558 DEBUG [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:56,559 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(327): Store=7be8b4dcb0e9f81dbd68149495fe8709/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:56,559 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,560 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:56,561 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7be8b4dcb0e9f81dbd68149495fe8709 columnFamilyName C 2024-12-16T17:58:56,561 DEBUG [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:56,561 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(327): Store=7be8b4dcb0e9f81dbd68149495fe8709/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:56,561 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,562 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,563 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,564 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:58:56,566 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1085): writing seq id for 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:56,569 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T17:58:56,569 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1102): Opened 7be8b4dcb0e9f81dbd68149495fe8709; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67159944, jitterRate=7.611513137817383E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:58:56,570 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1001): Region open journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:58:56,571 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., pid=145, masterSystemTime=1734371936547 2024-12-16T17:58:56,572 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,572 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:56,572 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=OPEN, openSeqNum=2, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:56,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-16T17:58:56,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; OpenRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 in 180 msec 2024-12-16T17:58:56,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=144, resume processing ppid=143 2024-12-16T17:58:56,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, ppid=143, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, ASSIGN in 338 msec 2024-12-16T17:58:56,575 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T17:58:56,575 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371936575"}]},"ts":"1734371936575"} 2024-12-16T17:58:56,576 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-16T17:58:56,585 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T17:58:56,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2550 sec 2024-12-16T17:58:57,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-16T17:58:57,440 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 143 completed 2024-12-16T17:58:57,441 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4766354f to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d342ad4 2024-12-16T17:58:57,535 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T17:58:57,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ebd696b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:57,567 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:57,569 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:57,571 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T17:58:57,573 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T17:58:57,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-16T17:58:57,576 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T17:58:57,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-16T17:58:57,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742372_1548 (size=999) 2024-12-16T17:58:57,994 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-16T17:58:57,994 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-16T17:58:57,998 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:58:58,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, REOPEN/MOVE}] 2024-12-16T17:58:58,002 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, REOPEN/MOVE 2024-12-16T17:58:58,002 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,003 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:58:58,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; CloseRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:58:58,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,156 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(124): Close 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,156 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:58:58,156 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1681): Closing 7be8b4dcb0e9f81dbd68149495fe8709, disabling compactions & flushes 2024-12-16T17:58:58,156 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,156 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,156 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. after waiting 0 ms 2024-12-16T17:58:58,156 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,163 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-16T17:58:58,165 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,165 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1635): Region close journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:58:58,165 WARN [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionServer(3786): Not adding moved region record: 7be8b4dcb0e9f81dbd68149495fe8709 to self. 2024-12-16T17:58:58,167 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(170): Closed 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,168 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=CLOSED 2024-12-16T17:58:58,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-16T17:58:58,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 in 166 msec 2024-12-16T17:58:58,172 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, REOPEN/MOVE; state=CLOSED, location=3609ad07831c,39733,1734371789085; forceNewPlan=false, retain=true 2024-12-16T17:58:58,323 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=OPENING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=148, state=RUNNABLE; OpenRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:58:58,479 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,486 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,486 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} 2024-12-16T17:58:58,487 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,487 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T17:58:58,487 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,487 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,491 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,492 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:58,493 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7be8b4dcb0e9f81dbd68149495fe8709 columnFamilyName A 2024-12-16T17:58:58,494 DEBUG [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:58,495 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(327): Store=7be8b4dcb0e9f81dbd68149495fe8709/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:58,495 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,496 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:58,496 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7be8b4dcb0e9f81dbd68149495fe8709 columnFamilyName B 2024-12-16T17:58:58,496 DEBUG [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:58,496 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(327): Store=7be8b4dcb0e9f81dbd68149495fe8709/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:58,496 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,497 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-16T17:58:58,497 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7be8b4dcb0e9f81dbd68149495fe8709 columnFamilyName C 2024-12-16T17:58:58,497 DEBUG [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:58,498 INFO [StoreOpener-7be8b4dcb0e9f81dbd68149495fe8709-1 {}] regionserver.HStore(327): Store=7be8b4dcb0e9f81dbd68149495fe8709/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T17:58:58,498 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,498 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,499 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,501 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T17:58:58,502 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,503 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 7be8b4dcb0e9f81dbd68149495fe8709; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71025683, jitterRate=0.05836515128612518}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T17:58:58,504 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:58:58,504 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., pid=150, masterSystemTime=1734371938479 2024-12-16T17:58:58,505 DEBUG [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,506 INFO [RS_OPEN_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,506 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=OPEN, openSeqNum=5, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=148 2024-12-16T17:58:58,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=148, state=SUCCESS; OpenRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 in 181 msec 2024-12-16T17:58:58,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-16T17:58:58,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, REOPEN/MOVE in 507 msec 2024-12-16T17:58:58,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-16T17:58:58,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-12-16T17:58:58,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 935 msec 2024-12-16T17:58:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-16T17:58:58,514 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c974465 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@19a1a9 2024-12-16T17:58:58,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@759a76f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5710ea10 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a490db2 2024-12-16T17:58:58,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c42c5fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,561 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c219434 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ed7229f 2024-12-16T17:58:58,569 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4107795, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,569 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x55aec38f to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@427ac07f 2024-12-16T17:58:58,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4551ab7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3051aceb to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@243cfba6 2024-12-16T17:58:58,585 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cfe9e20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,586 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x043c1ec4 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cab8ffd 2024-12-16T17:58:58,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4963c836, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,595 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b87baa3 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b930a49 2024-12-16T17:58:58,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ce64b75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,602 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f03b85b to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c8138b0 2024-12-16T17:58:58,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62826db6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63ce3f67 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36dfcf16 2024-12-16T17:58:58,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47c1fba6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,619 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58752c73 to 127.0.0.1:49190 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@101aef14 2024-12-16T17:58:58,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58ce3a7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T17:58:58,631 DEBUG [hconnection-0x4fde60b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:58:58,632 DEBUG [hconnection-0x64bdcbb0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,633 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,633 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,634 DEBUG [hconnection-0x3c6381c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-12-16T17:58:58,634 DEBUG [hconnection-0x499a169-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,635 DEBUG [hconnection-0x2c04a4bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,635 DEBUG [hconnection-0x2eededd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,635 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:58:58,635 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,636 DEBUG [hconnection-0x6d00c2f8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,636 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,636 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,636 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,636 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:58:58,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:58:58,636 DEBUG [hconnection-0x19d080d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,636 DEBUG [hconnection-0x125409cc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-16T17:58:58,637 DEBUG [hconnection-0x1467f15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T17:58:58,637 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,637 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,637 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47488, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,638 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T17:58:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:58,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-16T17:58:58,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:58:58,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:58,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:58:58,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:58,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:58:58,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:58:58,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734371998651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734371998651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734371998652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734371998653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734371998653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216ec5f021657ee4907a3a28dcf193df012_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371938641/Put/seqid=0 2024-12-16T17:58:58,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742373_1549 (size=12154) 2024-12-16T17:58:58,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-16T17:58:58,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734371998754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734371998754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734371998754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734371998756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734371998756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,789 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:58,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:58,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:58,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:58,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-16T17:58:58,941 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:58,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:58,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:58,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:58,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734371998956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734371998957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734371998957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734371998957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:58,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:58,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734371998957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,069 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:58:59,072 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216ec5f021657ee4907a3a28dcf193df012_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216ec5f021657ee4907a3a28dcf193df012_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:58:59,073 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1ed1223a6790412e90e76d0b6198fa3d, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:58:59,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1ed1223a6790412e90e76d0b6198fa3d is 175, key is test_row_0/A:col10/1734371938641/Put/seqid=0 2024-12-16T17:58:59,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742374_1550 (size=30955) 2024-12-16T17:58:59,093 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-16T17:58:59,245 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734371999259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734371999259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734371999260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734371999261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734371999262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:59,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:59,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,476 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1ed1223a6790412e90e76d0b6198fa3d 2024-12-16T17:58:59,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/76372f8dc86349f585e7c81e2b3353fd is 50, key is test_row_0/B:col10/1734371938641/Put/seqid=0 2024-12-16T17:58:59,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742375_1551 (size=12001) 2024-12-16T17:58:59,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/76372f8dc86349f585e7c81e2b3353fd 2024-12-16T17:58:59,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/235b01cba3ec488ea4d5e39ba1b0dd21 is 50, key is test_row_0/C:col10/1734371938641/Put/seqid=0 2024-12-16T17:58:59,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742376_1552 (size=12001) 2024-12-16T17:58:59,560 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:59,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:59,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:59,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:59,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-16T17:58:59,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734371999764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734371999764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734371999764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734371999764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:58:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734371999764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:58:59,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:58:59,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:58:59,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:58:59,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:58:59,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/235b01cba3ec488ea4d5e39ba1b0dd21 2024-12-16T17:58:59,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1ed1223a6790412e90e76d0b6198fa3d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1ed1223a6790412e90e76d0b6198fa3d 2024-12-16T17:58:59,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1ed1223a6790412e90e76d0b6198fa3d, entries=150, sequenceid=18, filesize=30.2 K 2024-12-16T17:58:59,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/76372f8dc86349f585e7c81e2b3353fd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/76372f8dc86349f585e7c81e2b3353fd 2024-12-16T17:58:59,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/76372f8dc86349f585e7c81e2b3353fd, entries=150, sequenceid=18, filesize=11.7 K 2024-12-16T17:58:59,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/235b01cba3ec488ea4d5e39ba1b0dd21 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/235b01cba3ec488ea4d5e39ba1b0dd21 2024-12-16T17:58:59,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/235b01cba3ec488ea4d5e39ba1b0dd21, entries=150, sequenceid=18, filesize=11.7 K 2024-12-16T17:58:59,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1293ms, sequenceid=18, compaction requested=false 2024-12-16T17:58:59,935 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-16T17:58:59,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:00,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-16T17:59:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:00,017 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-16T17:59:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:00,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:00,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:00,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b6e370d3dd2f43c0b64b503eac49b16e_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371938652/Put/seqid=0 2024-12-16T17:59:00,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742377_1553 (size=12154) 2024-12-16T17:59:00,126 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-16T17:59:00,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:00,428 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b6e370d3dd2f43c0b64b503eac49b16e_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b6e370d3dd2f43c0b64b503eac49b16e_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:00,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/40d17e76e6bb4faca63a3d0ecc29a7a7, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:00,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/40d17e76e6bb4faca63a3d0ecc29a7a7 is 175, key is test_row_0/A:col10/1734371938652/Put/seqid=0 2024-12-16T17:59:00,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742378_1554 (size=30955) 2024-12-16T17:59:00,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-16T17:59:00,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:00,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:00,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372000774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372000774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372000774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372000776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372000776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,832 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/40d17e76e6bb4faca63a3d0ecc29a7a7 2024-12-16T17:59:00,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/d1f75b2789e040c1bc1e999642231d64 is 50, key is test_row_0/B:col10/1734371938652/Put/seqid=0 2024-12-16T17:59:00,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742379_1555 (size=12001) 2024-12-16T17:59:00,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372000876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372000876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372000876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372000877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:00,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:00,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372000878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372001078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372001078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372001079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372001079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372001080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,240 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/d1f75b2789e040c1bc1e999642231d64 2024-12-16T17:59:01,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/637041afafeb4e50a9155f367d4a7358 is 50, key is test_row_0/C:col10/1734371938652/Put/seqid=0 2024-12-16T17:59:01,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742380_1556 (size=12001) 2024-12-16T17:59:01,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372001381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372001382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372001382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372001383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372001383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,663 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/637041afafeb4e50a9155f367d4a7358 2024-12-16T17:59:01,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/40d17e76e6bb4faca63a3d0ecc29a7a7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/40d17e76e6bb4faca63a3d0ecc29a7a7 2024-12-16T17:59:01,668 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/40d17e76e6bb4faca63a3d0ecc29a7a7, entries=150, sequenceid=40, filesize=30.2 K 2024-12-16T17:59:01,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/d1f75b2789e040c1bc1e999642231d64 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/d1f75b2789e040c1bc1e999642231d64 2024-12-16T17:59:01,671 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/d1f75b2789e040c1bc1e999642231d64, entries=150, sequenceid=40, filesize=11.7 K 2024-12-16T17:59:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/637041afafeb4e50a9155f367d4a7358 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/637041afafeb4e50a9155f367d4a7358 2024-12-16T17:59:01,674 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/637041afafeb4e50a9155f367d4a7358, entries=150, sequenceid=40, filesize=11.7 K 2024-12-16T17:59:01,675 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1658ms, sequenceid=40, compaction requested=false 2024-12-16T17:59:01,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:01,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:01,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-12-16T17:59:01,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-12-16T17:59:01,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-16T17:59:01,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0400 sec 2024-12-16T17:59:01,680 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 3.0490 sec 2024-12-16T17:59:01,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:01,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-16T17:59:01,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:01,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:01,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:01,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:01,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:01,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:01,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216e53ec42e240e4332a559ef79dda98ad7_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371941886/Put/seqid=0 2024-12-16T17:59:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742381_1557 (size=14594) 2024-12-16T17:59:01,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372001897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372001898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372001899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372001899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:01,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372001899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372002000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372002002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372002002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372002002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372002002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372002203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372002204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372002204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372002205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372002205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,298 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:02,301 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216e53ec42e240e4332a559ef79dda98ad7_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216e53ec42e240e4332a559ef79dda98ad7_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:02,302 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/2b6f7b9a373844e1a3c4c345a10b83f8, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:02,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/2b6f7b9a373844e1a3c4c345a10b83f8 is 175, key is test_row_0/A:col10/1734371941886/Put/seqid=0 2024-12-16T17:59:02,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742382_1558 (size=39549) 2024-12-16T17:59:02,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372002506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372002506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372002507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372002507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372002508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,727 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=57, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/2b6f7b9a373844e1a3c4c345a10b83f8 2024-12-16T17:59:02,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/0a2ede75b1394d6394eae707882c5bfc is 50, key is test_row_0/B:col10/1734371941886/Put/seqid=0 2024-12-16T17:59:02,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742383_1559 (size=12001) 2024-12-16T17:59:02,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-16T17:59:02,741 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-12-16T17:59:02,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:59:02,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-16T17:59:02,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-16T17:59:02,743 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:59:02,743 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:59:02,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:59:02,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-16T17:59:02,895 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:02,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-16T17:59:02,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:02,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:02,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:02,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:02,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:03,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:03,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372003009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:03,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:03,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372003009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:03,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:03,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372003009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:03,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:03,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372003010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:03,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372003013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-16T17:59:03,047 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:03,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-16T17:59:03,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:03,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:03,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:03,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:03,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:03,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:03,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/0a2ede75b1394d6394eae707882c5bfc 2024-12-16T17:59:03,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/8a1c7458d62a422893df210aa565ad82 is 50, key is test_row_0/C:col10/1734371941886/Put/seqid=0 2024-12-16T17:59:03,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742384_1560 (size=12001) 2024-12-16T17:59:03,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/8a1c7458d62a422893df210aa565ad82 2024-12-16T17:59:03,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/2b6f7b9a373844e1a3c4c345a10b83f8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/2b6f7b9a373844e1a3c4c345a10b83f8 2024-12-16T17:59:03,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/2b6f7b9a373844e1a3c4c345a10b83f8, entries=200, sequenceid=57, filesize=38.6 K 2024-12-16T17:59:03,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/0a2ede75b1394d6394eae707882c5bfc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0a2ede75b1394d6394eae707882c5bfc 2024-12-16T17:59:03,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0a2ede75b1394d6394eae707882c5bfc, entries=150, sequenceid=57, filesize=11.7 K 2024-12-16T17:59:03,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/8a1c7458d62a422893df210aa565ad82 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a1c7458d62a422893df210aa565ad82 2024-12-16T17:59:03,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a1c7458d62a422893df210aa565ad82, entries=150, sequenceid=57, filesize=11.7 K 2024-12-16T17:59:03,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=120.76 KB/123660 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1276ms, sequenceid=57, compaction requested=true 2024-12-16T17:59:03,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:03,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:59:03,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:03,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:59:03,163 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:03,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:03,163 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:03,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:59:03,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:03,164 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:03,164 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/A is initiating minor compaction (all files) 2024-12-16T17:59:03,164 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/A in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:03,164 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1ed1223a6790412e90e76d0b6198fa3d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/40d17e76e6bb4faca63a3d0ecc29a7a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/2b6f7b9a373844e1a3c4c345a10b83f8] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=99.1 K 2024-12-16T17:59:03,164 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:03,164 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1ed1223a6790412e90e76d0b6198fa3d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/40d17e76e6bb4faca63a3d0ecc29a7a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/2b6f7b9a373844e1a3c4c345a10b83f8] 2024-12-16T17:59:03,164 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:03,164 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/B is initiating minor compaction (all files) 2024-12-16T17:59:03,164 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ed1223a6790412e90e76d0b6198fa3d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734371938640 2024-12-16T17:59:03,164 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/B in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:03,165 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/76372f8dc86349f585e7c81e2b3353fd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/d1f75b2789e040c1bc1e999642231d64, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0a2ede75b1394d6394eae707882c5bfc] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=35.2 K 2024-12-16T17:59:03,165 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40d17e76e6bb4faca63a3d0ecc29a7a7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734371938651 2024-12-16T17:59:03,165 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 76372f8dc86349f585e7c81e2b3353fd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734371938640 2024-12-16T17:59:03,165 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b6f7b9a373844e1a3c4c345a10b83f8, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1734371940775 2024-12-16T17:59:03,165 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting d1f75b2789e040c1bc1e999642231d64, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734371938651 2024-12-16T17:59:03,166 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a2ede75b1394d6394eae707882c5bfc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1734371940775 2024-12-16T17:59:03,170 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:03,171 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#B#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:03,171 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/57e26f80a1c1428fbe8984bb05ab9f26 is 50, key is test_row_0/B:col10/1734371941886/Put/seqid=0 2024-12-16T17:59:03,171 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412169ec08d4f98284aa99fcdf394c9de6fee_7be8b4dcb0e9f81dbd68149495fe8709 store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:03,173 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412169ec08d4f98284aa99fcdf394c9de6fee_7be8b4dcb0e9f81dbd68149495fe8709, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:03,173 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412169ec08d4f98284aa99fcdf394c9de6fee_7be8b4dcb0e9f81dbd68149495fe8709 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:03,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742385_1561 (size=12104) 2024-12-16T17:59:03,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742386_1562 (size=4469) 2024-12-16T17:59:03,177 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#A#compaction#481 average throughput is 4.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:03,177 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/19c41e20311d46edbbc336facf20d86a is 175, key is test_row_0/A:col10/1734371941886/Put/seqid=0 2024-12-16T17:59:03,177 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/57e26f80a1c1428fbe8984bb05ab9f26 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/57e26f80a1c1428fbe8984bb05ab9f26 2024-12-16T17:59:03,181 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/B of 7be8b4dcb0e9f81dbd68149495fe8709 into 57e26f80a1c1428fbe8984bb05ab9f26(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:03,181 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:03,181 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/B, priority=13, startTime=1734371943163; duration=0sec 2024-12-16T17:59:03,181 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:03,181 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:B 2024-12-16T17:59:03,181 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:03,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742387_1563 (size=31058) 2024-12-16T17:59:03,185 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:03,185 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/C is initiating minor compaction (all files) 2024-12-16T17:59:03,185 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/C in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:03,185 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/235b01cba3ec488ea4d5e39ba1b0dd21, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/637041afafeb4e50a9155f367d4a7358, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a1c7458d62a422893df210aa565ad82] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=35.2 K 2024-12-16T17:59:03,185 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 235b01cba3ec488ea4d5e39ba1b0dd21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734371938640 2024-12-16T17:59:03,186 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 637041afafeb4e50a9155f367d4a7358, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734371938651 2024-12-16T17:59:03,186 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a1c7458d62a422893df210aa565ad82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1734371940775 2024-12-16T17:59:03,190 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#C#compaction#482 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:03,190 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/8a80b9aaca434c8a9e7ca7dc01a227ab is 50, key is test_row_0/C:col10/1734371941886/Put/seqid=0 2024-12-16T17:59:03,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742388_1564 (size=12104) 2024-12-16T17:59:03,199 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:03,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-16T17:59:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:03,200 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-16T17:59:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:03,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:03,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168c50bceddb0a4fc19183a65b3e9f0ef8_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371941897/Put/seqid=0 2024-12-16T17:59:03,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742389_1565 (size=12154) 2024-12-16T17:59:03,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-16T17:59:03,588 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/19c41e20311d46edbbc336facf20d86a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/19c41e20311d46edbbc336facf20d86a 2024-12-16T17:59:03,592 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/A of 7be8b4dcb0e9f81dbd68149495fe8709 into 19c41e20311d46edbbc336facf20d86a(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:03,592 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:03,592 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/A, priority=13, startTime=1734371943163; duration=0sec 2024-12-16T17:59:03,592 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:03,592 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:A 2024-12-16T17:59:03,597 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/8a80b9aaca434c8a9e7ca7dc01a227ab as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a80b9aaca434c8a9e7ca7dc01a227ab 2024-12-16T17:59:03,601 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/C of 7be8b4dcb0e9f81dbd68149495fe8709 into 8a80b9aaca434c8a9e7ca7dc01a227ab(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:03,601 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:03,601 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/C, priority=13, startTime=1734371943163; duration=0sec 2024-12-16T17:59:03,601 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:03,601 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:C 2024-12-16T17:59:03,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:03,612 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168c50bceddb0a4fc19183a65b3e9f0ef8_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168c50bceddb0a4fc19183a65b3e9f0ef8_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:03,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/def54049670243a88e25915f9e683774, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:03,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/def54049670243a88e25915f9e683774 is 175, key is test_row_0/A:col10/1734371941897/Put/seqid=0 2024-12-16T17:59:03,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742390_1566 (size=30955) 2024-12-16T17:59:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-16T17:59:04,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:04,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:04,017 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/def54049670243a88e25915f9e683774 2024-12-16T17:59:04,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7e85eff050804fe9baeea2d747ca739c is 50, key is test_row_0/B:col10/1734371941897/Put/seqid=0 2024-12-16T17:59:04,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372004022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372004022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742391_1567 (size=12001) 2024-12-16T17:59:04,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372004023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372004023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372004024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372004126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372004126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372004126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372004126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372004127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372004328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372004328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372004328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372004329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372004330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,425 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7e85eff050804fe9baeea2d747ca739c 2024-12-16T17:59:04,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/c5b719d5f681424f8fd79f7e0073543f is 50, key is test_row_0/C:col10/1734371941897/Put/seqid=0 2024-12-16T17:59:04,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742392_1568 (size=12001) 2024-12-16T17:59:04,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372004631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372004631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372004632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372004633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:04,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372004633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:04,834 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/c5b719d5f681424f8fd79f7e0073543f 2024-12-16T17:59:04,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/def54049670243a88e25915f9e683774 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/def54049670243a88e25915f9e683774 2024-12-16T17:59:04,839 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/def54049670243a88e25915f9e683774, entries=150, sequenceid=79, filesize=30.2 K 2024-12-16T17:59:04,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7e85eff050804fe9baeea2d747ca739c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7e85eff050804fe9baeea2d747ca739c 2024-12-16T17:59:04,842 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7e85eff050804fe9baeea2d747ca739c, entries=150, sequenceid=79, filesize=11.7 K 2024-12-16T17:59:04,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/c5b719d5f681424f8fd79f7e0073543f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/c5b719d5f681424f8fd79f7e0073543f 2024-12-16T17:59:04,845 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/c5b719d5f681424f8fd79f7e0073543f, entries=150, sequenceid=79, filesize=11.7 K 2024-12-16T17:59:04,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-16T17:59:04,846 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1646ms, sequenceid=79, compaction requested=false 2024-12-16T17:59:04,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:04,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:04,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-16T17:59:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-16T17:59:04,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-16T17:59:04,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1030 sec 2024-12-16T17:59:04,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 2.1060 sec 2024-12-16T17:59:05,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:05,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-16T17:59:05,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:05,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412169a534a254bc14050b82e9f3d70a07f1e_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371945134/Put/seqid=0 2024-12-16T17:59:05,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742393_1569 (size=12154) 2024-12-16T17:59:05,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372005143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372005144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372005145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372005146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372005147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372005247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372005247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372005247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372005248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372005249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372005450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372005450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372005451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372005451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372005452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,547 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:05,550 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412169a534a254bc14050b82e9f3d70a07f1e_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412169a534a254bc14050b82e9f3d70a07f1e_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:05,550 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/c17282290f744410b475b078c681d763, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:05,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/c17282290f744410b475b078c681d763 is 175, key is test_row_0/A:col10/1734371945134/Put/seqid=0 2024-12-16T17:59:05,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742394_1570 (size=30955) 2024-12-16T17:59:05,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372005753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372005753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372005753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372005755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372005755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:05,954 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=99, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/c17282290f744410b475b078c681d763 2024-12-16T17:59:05,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/82d47fafb25a4dd59d3d75c2bb42da90 is 50, key is test_row_0/B:col10/1734371945134/Put/seqid=0 2024-12-16T17:59:05,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742395_1571 (size=12001) 2024-12-16T17:59:06,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372006255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:06,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372006259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:06,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372006259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:06,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372006259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:06,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372006261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:06,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/82d47fafb25a4dd59d3d75c2bb42da90 2024-12-16T17:59:06,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/7232039e1bc7415d812fe7c324ea5078 is 50, key is test_row_0/C:col10/1734371945134/Put/seqid=0 2024-12-16T17:59:06,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742396_1572 (size=12001) 2024-12-16T17:59:06,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/7232039e1bc7415d812fe7c324ea5078 2024-12-16T17:59:06,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/c17282290f744410b475b078c681d763 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/c17282290f744410b475b078c681d763 2024-12-16T17:59:06,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/c17282290f744410b475b078c681d763, entries=150, sequenceid=99, filesize=30.2 K 2024-12-16T17:59:06,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/82d47fafb25a4dd59d3d75c2bb42da90 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/82d47fafb25a4dd59d3d75c2bb42da90 2024-12-16T17:59:06,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/82d47fafb25a4dd59d3d75c2bb42da90, entries=150, sequenceid=99, filesize=11.7 K 2024-12-16T17:59:06,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/7232039e1bc7415d812fe7c324ea5078 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/7232039e1bc7415d812fe7c324ea5078 2024-12-16T17:59:06,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/7232039e1bc7415d812fe7c324ea5078, entries=150, sequenceid=99, filesize=11.7 K 2024-12-16T17:59:06,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1651ms, sequenceid=99, compaction requested=true 2024-12-16T17:59:06,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:06,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:59:06,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:06,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:59:06,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:06,787 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:06,787 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:06,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:59:06,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:06,787 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:06,787 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/B is initiating minor compaction (all files) 2024-12-16T17:59:06,787 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:06,787 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/A is initiating minor compaction (all files) 2024-12-16T17:59:06,787 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/B in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:06,787 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/A in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:06,788 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/57e26f80a1c1428fbe8984bb05ab9f26, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7e85eff050804fe9baeea2d747ca739c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/82d47fafb25a4dd59d3d75c2bb42da90] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=35.3 K 2024-12-16T17:59:06,788 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/19c41e20311d46edbbc336facf20d86a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/def54049670243a88e25915f9e683774, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/c17282290f744410b475b078c681d763] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=90.8 K 2024-12-16T17:59:06,788 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:06,788 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/19c41e20311d46edbbc336facf20d86a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/def54049670243a88e25915f9e683774, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/c17282290f744410b475b078c681d763] 2024-12-16T17:59:06,788 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 57e26f80a1c1428fbe8984bb05ab9f26, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1734371940775 2024-12-16T17:59:06,788 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19c41e20311d46edbbc336facf20d86a, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1734371940775 2024-12-16T17:59:06,788 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e85eff050804fe9baeea2d747ca739c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734371941895 2024-12-16T17:59:06,788 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting def54049670243a88e25915f9e683774, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734371941895 2024-12-16T17:59:06,788 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 82d47fafb25a4dd59d3d75c2bb42da90, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734371944022 2024-12-16T17:59:06,788 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c17282290f744410b475b078c681d763, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734371944022 2024-12-16T17:59:06,792 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:06,792 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#B#compaction#489 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:06,793 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/dff7b39b0e544f9cac04220be632a910 is 50, key is test_row_0/B:col10/1734371945134/Put/seqid=0 2024-12-16T17:59:06,793 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121613bb9e8ff23147a882dfc1581a45eac6_7be8b4dcb0e9f81dbd68149495fe8709 store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:06,795 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121613bb9e8ff23147a882dfc1581a45eac6_7be8b4dcb0e9f81dbd68149495fe8709, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:06,795 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121613bb9e8ff23147a882dfc1581a45eac6_7be8b4dcb0e9f81dbd68149495fe8709 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:06,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742397_1573 (size=12207) 2024-12-16T17:59:06,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742398_1574 (size=4469) 2024-12-16T17:59:06,802 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#A#compaction#490 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:06,803 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/32506f7da57d471d8c2c14c2ed856acc is 175, key is test_row_0/A:col10/1734371945134/Put/seqid=0 2024-12-16T17:59:06,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742399_1575 (size=31161) 2024-12-16T17:59:06,809 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/32506f7da57d471d8c2c14c2ed856acc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/32506f7da57d471d8c2c14c2ed856acc 2024-12-16T17:59:06,812 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/A of 7be8b4dcb0e9f81dbd68149495fe8709 into 32506f7da57d471d8c2c14c2ed856acc(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:06,812 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:06,812 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/A, priority=13, startTime=1734371946787; duration=0sec 2024-12-16T17:59:06,813 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:06,813 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:A 2024-12-16T17:59:06,813 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:06,813 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:06,813 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/C is initiating minor compaction (all files) 2024-12-16T17:59:06,813 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/C in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:06,814 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a80b9aaca434c8a9e7ca7dc01a227ab, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/c5b719d5f681424f8fd79f7e0073543f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/7232039e1bc7415d812fe7c324ea5078] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=35.3 K 2024-12-16T17:59:06,814 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a80b9aaca434c8a9e7ca7dc01a227ab, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1734371940775 2024-12-16T17:59:06,814 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5b719d5f681424f8fd79f7e0073543f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734371941895 2024-12-16T17:59:06,814 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7232039e1bc7415d812fe7c324ea5078, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734371944022 2024-12-16T17:59:06,819 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#C#compaction#491 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:06,819 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/6437aec0250e447d9b2892637d6adec7 is 50, key is test_row_0/C:col10/1734371945134/Put/seqid=0 2024-12-16T17:59:06,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742400_1576 (size=12207) 2024-12-16T17:59:06,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-16T17:59:06,847 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-16T17:59:06,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:59:06,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-16T17:59:06,848 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:59:06,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T17:59:06,849 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:59:06,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:59:06,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T17:59:07,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-16T17:59:07,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:07,000 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-16T17:59:07,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:07,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:07,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:07,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:07,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:07,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:07,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a672f28c55ed463aa5c1e94b2935f2f1_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371945143/Put/seqid=0 2024-12-16T17:59:07,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742401_1577 (size=12154) 2024-12-16T17:59:07,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T17:59:07,202 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/dff7b39b0e544f9cac04220be632a910 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/dff7b39b0e544f9cac04220be632a910 2024-12-16T17:59:07,205 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/B of 7be8b4dcb0e9f81dbd68149495fe8709 into dff7b39b0e544f9cac04220be632a910(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:07,205 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:07,205 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/B, priority=13, startTime=1734371946787; duration=0sec 2024-12-16T17:59:07,205 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:07,205 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:B 2024-12-16T17:59:07,226 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/6437aec0250e447d9b2892637d6adec7 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6437aec0250e447d9b2892637d6adec7 2024-12-16T17:59:07,229 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/C of 7be8b4dcb0e9f81dbd68149495fe8709 into 6437aec0250e447d9b2892637d6adec7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:07,229 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:07,229 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/C, priority=13, startTime=1734371946787; duration=0sec 2024-12-16T17:59:07,229 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:07,229 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:C 2024-12-16T17:59:07,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:07,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372007272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372007272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372007272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372007273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372007273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372007375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372007375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372007375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372007375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:07,414 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a672f28c55ed463aa5c1e94b2935f2f1_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a672f28c55ed463aa5c1e94b2935f2f1_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:07,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/654d1d03ff3d4462b2e814e1350d907f, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:07,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/654d1d03ff3d4462b2e814e1350d907f is 175, key is test_row_0/A:col10/1734371945143/Put/seqid=0 2024-12-16T17:59:07,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742402_1578 (size=30955) 2024-12-16T17:59:07,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T17:59:07,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372007577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372007578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372007578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372007578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,818 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/654d1d03ff3d4462b2e814e1350d907f 2024-12-16T17:59:07,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/52ea84c84bd64ffc9b1a875bcee0eccc is 50, key is test_row_0/B:col10/1734371945143/Put/seqid=0 2024-12-16T17:59:07,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742403_1579 (size=12001) 2024-12-16T17:59:07,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372007882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372007882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372007882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372007883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T17:59:08,228 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/52ea84c84bd64ffc9b1a875bcee0eccc 2024-12-16T17:59:08,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/e6396044c5b04173879b029440bc8fc0 is 50, key is test_row_0/C:col10/1734371945143/Put/seqid=0 2024-12-16T17:59:08,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742404_1580 (size=12001) 2024-12-16T17:59:08,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372008384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:08,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372008385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:08,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372008388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:08,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372008388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:08,636 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/e6396044c5b04173879b029440bc8fc0 2024-12-16T17:59:08,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/654d1d03ff3d4462b2e814e1350d907f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/654d1d03ff3d4462b2e814e1350d907f 2024-12-16T17:59:08,642 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/654d1d03ff3d4462b2e814e1350d907f, entries=150, sequenceid=119, filesize=30.2 K 2024-12-16T17:59:08,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/52ea84c84bd64ffc9b1a875bcee0eccc as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/52ea84c84bd64ffc9b1a875bcee0eccc 2024-12-16T17:59:08,645 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/52ea84c84bd64ffc9b1a875bcee0eccc, entries=150, sequenceid=119, filesize=11.7 K 2024-12-16T17:59:08,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/e6396044c5b04173879b029440bc8fc0 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6396044c5b04173879b029440bc8fc0 2024-12-16T17:59:08,648 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6396044c5b04173879b029440bc8fc0, entries=150, sequenceid=119, filesize=11.7 K 2024-12-16T17:59:08,648 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1648ms, sequenceid=119, compaction requested=false 2024-12-16T17:59:08,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:08,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:08,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-16T17:59:08,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-16T17:59:08,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-16T17:59:08,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8000 sec 2024-12-16T17:59:08,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 1.8030 sec 2024-12-16T17:59:08,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T17:59:08,952 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-16T17:59:08,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:59:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-16T17:59:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-16T17:59:08,954 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:59:08,954 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:59:08,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:59:09,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-16T17:59:09,105 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-16T17:59:09,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:09,106 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:59:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:09,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216376b8418c6a745599de980258d415bf6_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371947272/Put/seqid=0 2024-12-16T17:59:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742405_1581 (size=12304) 2024-12-16T17:59:09,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-16T17:59:09,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:09,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:09,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372009317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372009389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372009391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372009394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372009396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372009418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:09,518 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216376b8418c6a745599de980258d415bf6_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216376b8418c6a745599de980258d415bf6_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:09,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/e263399596344eb485b3d79af5977c1b, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:09,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/e263399596344eb485b3d79af5977c1b is 175, key is test_row_0/A:col10/1734371947272/Put/seqid=0 2024-12-16T17:59:09,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742406_1582 (size=31105) 2024-12-16T17:59:09,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-16T17:59:09,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372009621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,922 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=139, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/e263399596344eb485b3d79af5977c1b 2024-12-16T17:59:09,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372009924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:09,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/2880418b33c34a6c8b1c061dcfaec1c4 is 50, key is test_row_0/B:col10/1734371947272/Put/seqid=0 2024-12-16T17:59:09,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742407_1583 (size=12151) 2024-12-16T17:59:10,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-16T17:59:10,332 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/2880418b33c34a6c8b1c061dcfaec1c4 2024-12-16T17:59:10,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/b50d9eb0b1c748a597c3f1076057e435 is 50, key is test_row_0/C:col10/1734371947272/Put/seqid=0 2024-12-16T17:59:10,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742408_1584 (size=12151) 2024-12-16T17:59:10,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372010426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:10,747 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/b50d9eb0b1c748a597c3f1076057e435 2024-12-16T17:59:10,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/e263399596344eb485b3d79af5977c1b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/e263399596344eb485b3d79af5977c1b 2024-12-16T17:59:10,762 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/e263399596344eb485b3d79af5977c1b, entries=150, sequenceid=139, filesize=30.4 K 2024-12-16T17:59:10,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/2880418b33c34a6c8b1c061dcfaec1c4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/2880418b33c34a6c8b1c061dcfaec1c4 2024-12-16T17:59:10,766 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/2880418b33c34a6c8b1c061dcfaec1c4, entries=150, sequenceid=139, filesize=11.9 K 2024-12-16T17:59:10,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/b50d9eb0b1c748a597c3f1076057e435 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/b50d9eb0b1c748a597c3f1076057e435 2024-12-16T17:59:10,770 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/b50d9eb0b1c748a597c3f1076057e435, entries=150, sequenceid=139, filesize=11.9 K 2024-12-16T17:59:10,770 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1665ms, sequenceid=139, compaction requested=true 2024-12-16T17:59:10,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:10,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:10,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-16T17:59:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-16T17:59:10,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-16T17:59:10,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8170 sec 2024-12-16T17:59:10,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 1.8200 sec 2024-12-16T17:59:11,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-16T17:59:11,057 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-16T17:59:11,058 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:59:11,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-16T17:59:11,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-16T17:59:11,059 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:59:11,059 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:59:11,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:59:11,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-16T17:59:11,210 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-16T17:59:11,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:11,211 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:59:11,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:11,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:11,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:11,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:11,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:11,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:11,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121670914c6b8b1b424ca8b964802312b62f_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371949295/Put/seqid=0 2024-12-16T17:59:11,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742409_1585 (size=12304) 2024-12-16T17:59:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-16T17:59:11,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:11,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372011417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372011418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372011418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372011419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372011429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372011520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372011520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372011521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372011521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:11,622 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121670914c6b8b1b424ca8b964802312b62f_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121670914c6b8b1b424ca8b964802312b62f_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:11,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/fa7cf489b14244e5ad54681da5c328cb, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:11,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/fa7cf489b14244e5ad54681da5c328cb is 175, key is test_row_0/A:col10/1734371949295/Put/seqid=0 2024-12-16T17:59:11,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742410_1586 (size=31105) 2024-12-16T17:59:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-16T17:59:11,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372011722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372011723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372011725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:11,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372011725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,026 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/fa7cf489b14244e5ad54681da5c328cb 2024-12-16T17:59:12,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372012026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372012027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372012029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372012029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/0ade4a4441da4359a8717f3bdc0b0236 is 50, key is test_row_0/B:col10/1734371949295/Put/seqid=0 2024-12-16T17:59:12,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742411_1587 (size=12151) 2024-12-16T17:59:12,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-16T17:59:12,435 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/0ade4a4441da4359a8717f3bdc0b0236 2024-12-16T17:59:12,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/a5a68d4ce2294ce2a90aea89376c0da8 is 50, key is test_row_0/C:col10/1734371949295/Put/seqid=0 2024-12-16T17:59:12,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742412_1588 (size=12151) 2024-12-16T17:59:12,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372012529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372012531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372012533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:12,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372012533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:12,845 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/a5a68d4ce2294ce2a90aea89376c0da8 2024-12-16T17:59:12,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/fa7cf489b14244e5ad54681da5c328cb as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/fa7cf489b14244e5ad54681da5c328cb 2024-12-16T17:59:12,872 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/fa7cf489b14244e5ad54681da5c328cb, entries=150, sequenceid=157, filesize=30.4 K 2024-12-16T17:59:12,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/0ade4a4441da4359a8717f3bdc0b0236 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0ade4a4441da4359a8717f3bdc0b0236 2024-12-16T17:59:12,875 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0ade4a4441da4359a8717f3bdc0b0236, entries=150, sequenceid=157, filesize=11.9 K 2024-12-16T17:59:12,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/a5a68d4ce2294ce2a90aea89376c0da8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/a5a68d4ce2294ce2a90aea89376c0da8 2024-12-16T17:59:12,878 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/a5a68d4ce2294ce2a90aea89376c0da8, entries=150, sequenceid=157, filesize=11.9 K 2024-12-16T17:59:12,878 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1667ms, sequenceid=157, compaction requested=true 2024-12-16T17:59:12,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:12,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:12,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-16T17:59:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-16T17:59:12,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-16T17:59:12,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8210 sec 2024-12-16T17:59:12,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.8230 sec 2024-12-16T17:59:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-16T17:59:13,162 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-16T17:59:13,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:59:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-16T17:59:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T17:59:13,164 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:59:13,164 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:59:13,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:59:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T17:59:13,315 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:13,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-16T17:59:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:13,316 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:59:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:13,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168d86f4082ed7470485a94a4763b639e6_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371951417/Put/seqid=0 2024-12-16T17:59:13,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742413_1589 (size=12304) 2024-12-16T17:59:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:13,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T17:59:13,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372013478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:13,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372013534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:13,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:13,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372013538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:13,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:13,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372013540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:13,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:13,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372013544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:13,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:13,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372013581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:13,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:13,728 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168d86f4082ed7470485a94a4763b639e6_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168d86f4082ed7470485a94a4763b639e6_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:13,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/67cac75738674822b7f260b132d6d124, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:13,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/67cac75738674822b7f260b132d6d124 is 175, key is test_row_0/A:col10/1734371951417/Put/seqid=0 2024-12-16T17:59:13,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742414_1590 (size=31105) 2024-12-16T17:59:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T17:59:13,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372013783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:14,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:14,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372014086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:14,132 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/67cac75738674822b7f260b132d6d124 2024-12-16T17:59:14,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/c139640199c142b280fe69e74f662024 is 50, key is test_row_0/B:col10/1734371951417/Put/seqid=0 2024-12-16T17:59:14,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742415_1591 (size=12151) 2024-12-16T17:59:14,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T17:59:14,541 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/c139640199c142b280fe69e74f662024 2024-12-16T17:59:14,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/6c589bc5aa3b4632a21b38ec1f59eb14 is 50, key is test_row_0/C:col10/1734371951417/Put/seqid=0 2024-12-16T17:59:14,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742416_1592 (size=12151) 2024-12-16T17:59:14,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:14,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372014590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:14,950 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/6c589bc5aa3b4632a21b38ec1f59eb14 2024-12-16T17:59:14,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/67cac75738674822b7f260b132d6d124 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/67cac75738674822b7f260b132d6d124 2024-12-16T17:59:14,956 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/67cac75738674822b7f260b132d6d124, entries=150, sequenceid=175, filesize=30.4 K 2024-12-16T17:59:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/c139640199c142b280fe69e74f662024 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/c139640199c142b280fe69e74f662024 2024-12-16T17:59:14,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,959 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/c139640199c142b280fe69e74f662024, entries=150, sequenceid=175, filesize=11.9 K 2024-12-16T17:59:14,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/6c589bc5aa3b4632a21b38ec1f59eb14 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6c589bc5aa3b4632a21b38ec1f59eb14 2024-12-16T17:59:14,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,963 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6c589bc5aa3b4632a21b38ec1f59eb14, entries=150, sequenceid=175, filesize=11.9 K 2024-12-16T17:59:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,965 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1649ms, sequenceid=175, compaction requested=true 2024-12-16T17:59:14,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:14,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:14,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-16T17:59:14,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-16T17:59:14,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-16T17:59:14,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8020 sec 2024-12-16T17:59:14,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.8050 sec 2024-12-16T17:59:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:14,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T17:59:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,267 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-16T17:59:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:59:15,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-16T17:59:15,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,270 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:59:15,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-16T17:59:15,271 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:59:15,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:59:15,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-16T17:59:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,422 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-16T17:59:15,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:15,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,423 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-16T17:59:15,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:15,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:15,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:15,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:15,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:15,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:15,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216edd959f254774483a4f17b0557853492_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_1/A:col10/1734371953470/Put/seqid=0 2024-12-16T17:59:15,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742417_1593 (size=9814) 2024-12-16T17:59:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,454 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216edd959f254774483a4f17b0557853492_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216edd959f254774483a4f17b0557853492_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:15,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/f76cf7a2aa7d492b891eb9a1e305f31b, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:15,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/f76cf7a2aa7d492b891eb9a1e305f31b is 175, key is test_row_1/A:col10/1734371953470/Put/seqid=0 2024-12-16T17:59:15,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742418_1594 (size=22461) 2024-12-16T17:59:15,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,470 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=193, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/f76cf7a2aa7d492b891eb9a1e305f31b 2024-12-16T17:59:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/b904dccbea474fa080b7b585ad011a0e is 50, key is test_row_1/B:col10/1734371953470/Put/seqid=0 2024-12-16T17:59:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742419_1595 (size=9757) 2024-12-16T17:59:15,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,496 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/b904dccbea474fa080b7b585ad011a0e 2024-12-16T17:59:15,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/1c0ea7523e2b471fa7e5d4802e1cba8e is 50, key is test_row_1/C:col10/1734371953470/Put/seqid=0 2024-12-16T17:59:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742420_1596 (size=9757) 2024-12-16T17:59:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,514 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/1c0ea7523e2b471fa7e5d4802e1cba8e 2024-12-16T17:59:15,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/f76cf7a2aa7d492b891eb9a1e305f31b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/f76cf7a2aa7d492b891eb9a1e305f31b 2024-12-16T17:59:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,522 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/f76cf7a2aa7d492b891eb9a1e305f31b, entries=100, sequenceid=193, filesize=21.9 K 2024-12-16T17:59:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/b904dccbea474fa080b7b585ad011a0e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b904dccbea474fa080b7b585ad011a0e 2024-12-16T17:59:15,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,526 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b904dccbea474fa080b7b585ad011a0e, entries=100, sequenceid=193, filesize=9.5 K 2024-12-16T17:59:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/1c0ea7523e2b471fa7e5d4802e1cba8e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/1c0ea7523e2b471fa7e5d4802e1cba8e 2024-12-16T17:59:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,531 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/1c0ea7523e2b471fa7e5d4802e1cba8e, entries=100, sequenceid=193, filesize=9.5 K 2024-12-16T17:59:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,532 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=0 B/0 for 7be8b4dcb0e9f81dbd68149495fe8709 in 110ms, sequenceid=193, compaction requested=true 2024-12-16T17:59:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:15,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:15,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-16T17:59:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-16T17:59:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,534 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-16T17:59:15,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 262 msec 2024-12-16T17:59:15,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 267 msec 2024-12-16T17:59:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-16T17:59:15,572 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-16T17:59:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-16T17:59:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,575 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-16T17:59:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-16T17:59:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,576 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T17:59:15,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T17:59:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:15,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-16T17:59:15,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:15,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:15,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:15,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:15,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:15,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:15,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:15,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216e20c943bb02946589cce3800d0721b99_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371955585/Put/seqid=0 2024-12-16T17:59:15,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742421_1597 (size=19774) 2024-12-16T17:59:15,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372015614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372015616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372015619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372015619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372015619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-16T17:59:15,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372015720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372015721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372015725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372015726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372015726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,728 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:15,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:15,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:15,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:15,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:15,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:15,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-16T17:59:15,880 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:15,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:15,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:15,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:15,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:15,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:15,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:15,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372015923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372015924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372015928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372015928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:15,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:15,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372015929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,019 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:16,021 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216e20c943bb02946589cce3800d0721b99_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216e20c943bb02946589cce3800d0721b99_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:16,022 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a7a24fea12af419cb4678647f565cef4, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:16,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a7a24fea12af419cb4678647f565cef4 is 175, key is test_row_0/A:col10/1734371955585/Put/seqid=0 2024-12-16T17:59:16,033 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:16,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:16,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742422_1598 (size=57033) 2024-12-16T17:59:16,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-16T17:59:16,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:16,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:16,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372016227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372016229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372016230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372016231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372016232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,338 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:16,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:16,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,437 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=204, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a7a24fea12af419cb4678647f565cef4 2024-12-16T17:59:16,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/b8851604047145f18936c5800bf36f5c is 50, key is test_row_0/B:col10/1734371955585/Put/seqid=0 2024-12-16T17:59:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742423_1599 (size=12151) 2024-12-16T17:59:16,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/b8851604047145f18936c5800bf36f5c 2024-12-16T17:59:16,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/436fc035863f4e99834334dc9628efc4 is 50, key is test_row_0/C:col10/1734371955585/Put/seqid=0 2024-12-16T17:59:16,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742424_1600 (size=12151) 2024-12-16T17:59:16,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:16,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:16,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:16,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-16T17:59:16,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372016731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372016732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372016733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372016734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372016735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,795 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:16,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:16,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T17:59:16,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/436fc035863f4e99834334dc9628efc4 2024-12-16T17:59:16,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a7a24fea12af419cb4678647f565cef4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a7a24fea12af419cb4678647f565cef4 2024-12-16T17:59:16,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a7a24fea12af419cb4678647f565cef4, entries=300, sequenceid=204, filesize=55.7 K 2024-12-16T17:59:16,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/b8851604047145f18936c5800bf36f5c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b8851604047145f18936c5800bf36f5c 2024-12-16T17:59:16,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b8851604047145f18936c5800bf36f5c, entries=150, sequenceid=204, filesize=11.9 K 2024-12-16T17:59:16,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/436fc035863f4e99834334dc9628efc4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/436fc035863f4e99834334dc9628efc4 2024-12-16T17:59:16,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/436fc035863f4e99834334dc9628efc4, entries=150, sequenceid=204, filesize=11.9 K 2024-12-16T17:59:16,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1314ms, sequenceid=204, compaction requested=true 2024-12-16T17:59:16,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:16,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:59:16,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:16,903 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-16T17:59:16,903 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-16T17:59:16,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:59:16,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:16,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:59:16,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:16,904 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 82569 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-16T17:59:16,905 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/B is initiating minor compaction (all files) 2024-12-16T17:59:16,905 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/B in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,905 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/dff7b39b0e544f9cac04220be632a910, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/52ea84c84bd64ffc9b1a875bcee0eccc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/2880418b33c34a6c8b1c061dcfaec1c4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0ade4a4441da4359a8717f3bdc0b0236, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/c139640199c142b280fe69e74f662024, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b904dccbea474fa080b7b585ad011a0e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b8851604047145f18936c5800bf36f5c] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=80.6 K 2024-12-16T17:59:16,905 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 234925 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-16T17:59:16,905 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/A is initiating minor compaction (all files) 2024-12-16T17:59:16,905 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/A in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,905 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/32506f7da57d471d8c2c14c2ed856acc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/654d1d03ff3d4462b2e814e1350d907f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/e263399596344eb485b3d79af5977c1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/fa7cf489b14244e5ad54681da5c328cb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/67cac75738674822b7f260b132d6d124, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/f76cf7a2aa7d492b891eb9a1e305f31b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a7a24fea12af419cb4678647f565cef4] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=229.4 K 2024-12-16T17:59:16,905 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=9 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,905 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/32506f7da57d471d8c2c14c2ed856acc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/654d1d03ff3d4462b2e814e1350d907f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/e263399596344eb485b3d79af5977c1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/fa7cf489b14244e5ad54681da5c328cb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/67cac75738674822b7f260b132d6d124, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/f76cf7a2aa7d492b891eb9a1e305f31b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a7a24fea12af419cb4678647f565cef4] 2024-12-16T17:59:16,906 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting dff7b39b0e544f9cac04220be632a910, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734371944022 2024-12-16T17:59:16,906 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32506f7da57d471d8c2c14c2ed856acc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734371944022 2024-12-16T17:59:16,906 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 52ea84c84bd64ffc9b1a875bcee0eccc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734371945143 2024-12-16T17:59:16,906 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 654d1d03ff3d4462b2e814e1350d907f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734371945143 2024-12-16T17:59:16,906 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 2880418b33c34a6c8b1c061dcfaec1c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1734371947269 2024-12-16T17:59:16,906 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting e263399596344eb485b3d79af5977c1b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1734371947269 2024-12-16T17:59:16,907 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ade4a4441da4359a8717f3bdc0b0236, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371949295 2024-12-16T17:59:16,907 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa7cf489b14244e5ad54681da5c328cb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371949295 2024-12-16T17:59:16,907 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67cac75738674822b7f260b132d6d124, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734371951414 2024-12-16T17:59:16,907 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting c139640199c142b280fe69e74f662024, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734371951414 2024-12-16T17:59:16,907 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting f76cf7a2aa7d492b891eb9a1e305f31b, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734371953470 2024-12-16T17:59:16,907 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b904dccbea474fa080b7b585ad011a0e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734371953470 2024-12-16T17:59:16,908 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7a24fea12af419cb4678647f565cef4, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371955559 2024-12-16T17:59:16,908 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b8851604047145f18936c5800bf36f5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371955585 2024-12-16T17:59:16,918 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:16,920 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#B#compaction#510 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:16,920 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7b24d75874e043dc870454ae65f1e857 is 50, key is test_row_0/B:col10/1734371955585/Put/seqid=0 2024-12-16T17:59:16,921 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412163f359f4aad8245d8a655cacd215f12ef_7be8b4dcb0e9f81dbd68149495fe8709 store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:16,923 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412163f359f4aad8245d8a655cacd215f12ef_7be8b4dcb0e9f81dbd68149495fe8709, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:16,923 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412163f359f4aad8245d8a655cacd215f12ef_7be8b4dcb0e9f81dbd68149495fe8709 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:16,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742425_1601 (size=12595) 2024-12-16T17:59:16,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742426_1602 (size=4469) 2024-12-16T17:59:16,947 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:16,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39733 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-16T17:59:16,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:16,948 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-16T17:59:16,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:16,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:16,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:16,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:16,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:16,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:16,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121696f70ed75bdb4240bd3f38e715749901_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371955618/Put/seqid=0 2024-12-16T17:59:16,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742427_1603 (size=12304) 2024-12-16T17:59:16,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:16,964 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121696f70ed75bdb4240bd3f38e715749901_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121696f70ed75bdb4240bd3f38e715749901_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:16,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a3c130e384034f09a0c6975b2ce836aa, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:16,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a3c130e384034f09a0c6975b2ce836aa is 175, key is test_row_0/A:col10/1734371955618/Put/seqid=0 2024-12-16T17:59:16,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742428_1604 (size=31105) 2024-12-16T17:59:17,335 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7b24d75874e043dc870454ae65f1e857 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7b24d75874e043dc870454ae65f1e857 2024-12-16T17:59:17,339 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/B of 7be8b4dcb0e9f81dbd68149495fe8709 into 7b24d75874e043dc870454ae65f1e857(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:17,339 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:17,339 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/B, priority=9, startTime=1734371956903; duration=0sec 2024-12-16T17:59:17,339 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:17,339 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:B 2024-12-16T17:59:17,339 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-16T17:59:17,340 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 82569 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-16T17:59:17,341 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/C is initiating minor compaction (all files) 2024-12-16T17:59:17,341 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/C in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:17,341 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6437aec0250e447d9b2892637d6adec7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6396044c5b04173879b029440bc8fc0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/b50d9eb0b1c748a597c3f1076057e435, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/a5a68d4ce2294ce2a90aea89376c0da8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6c589bc5aa3b4632a21b38ec1f59eb14, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/1c0ea7523e2b471fa7e5d4802e1cba8e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/436fc035863f4e99834334dc9628efc4] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=80.6 K 2024-12-16T17:59:17,341 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#A#compaction#511 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:17,341 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/078759a43c214e47bc29467cc61c8045 is 175, key is test_row_0/A:col10/1734371955585/Put/seqid=0 2024-12-16T17:59:17,342 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6437aec0250e447d9b2892637d6adec7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734371944022 2024-12-16T17:59:17,342 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e6396044c5b04173879b029440bc8fc0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734371945143 2024-12-16T17:59:17,343 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting b50d9eb0b1c748a597c3f1076057e435, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1734371947269 2024-12-16T17:59:17,343 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting a5a68d4ce2294ce2a90aea89376c0da8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734371949295 2024-12-16T17:59:17,343 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c589bc5aa3b4632a21b38ec1f59eb14, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734371951414 2024-12-16T17:59:17,343 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c0ea7523e2b471fa7e5d4802e1cba8e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734371953470 2024-12-16T17:59:17,343 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 436fc035863f4e99834334dc9628efc4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371955585 2024-12-16T17:59:17,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742429_1605 (size=31549) 2024-12-16T17:59:17,349 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/078759a43c214e47bc29467cc61c8045 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/078759a43c214e47bc29467cc61c8045 2024-12-16T17:59:17,353 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/A of 7be8b4dcb0e9f81dbd68149495fe8709 into 078759a43c214e47bc29467cc61c8045(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:17,353 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:17,353 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/A, priority=9, startTime=1734371956903; duration=0sec 2024-12-16T17:59:17,353 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:17,353 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:A 2024-12-16T17:59:17,355 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#C#compaction#513 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:17,355 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/04890b459c2342ad9ec3711d6e338e4f is 50, key is test_row_0/C:col10/1734371955585/Put/seqid=0 2024-12-16T17:59:17,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742430_1606 (size=12595) 2024-12-16T17:59:17,368 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=230, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a3c130e384034f09a0c6975b2ce836aa 2024-12-16T17:59:17,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/e013c3af51cb4046aa04f3a06e9903bd is 50, key is test_row_0/B:col10/1734371955618/Put/seqid=0 2024-12-16T17:59:17,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742431_1607 (size=12151) 2024-12-16T17:59:17,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-16T17:59:17,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:17,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. as already flushing 2024-12-16T17:59:17,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372017743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372017745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372017746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372017746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372017746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,765 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/04890b459c2342ad9ec3711d6e338e4f as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/04890b459c2342ad9ec3711d6e338e4f 2024-12-16T17:59:17,769 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/C of 7be8b4dcb0e9f81dbd68149495fe8709 into 04890b459c2342ad9ec3711d6e338e4f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:17,769 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:17,769 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/C, priority=9, startTime=1734371956903; duration=0sec 2024-12-16T17:59:17,769 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:17,769 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:C 2024-12-16T17:59:17,777 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/e013c3af51cb4046aa04f3a06e9903bd 2024-12-16T17:59:17,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/3b5297844056490097b7493934d908b9 is 50, key is test_row_0/C:col10/1734371955618/Put/seqid=0 2024-12-16T17:59:17,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742432_1608 (size=12151) 2024-12-16T17:59:17,786 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/3b5297844056490097b7493934d908b9 2024-12-16T17:59:17,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/a3c130e384034f09a0c6975b2ce836aa as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a3c130e384034f09a0c6975b2ce836aa 2024-12-16T17:59:17,792 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a3c130e384034f09a0c6975b2ce836aa, entries=150, sequenceid=230, filesize=30.4 K 2024-12-16T17:59:17,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/e013c3af51cb4046aa04f3a06e9903bd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/e013c3af51cb4046aa04f3a06e9903bd 2024-12-16T17:59:17,796 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/e013c3af51cb4046aa04f3a06e9903bd, entries=150, sequenceid=230, filesize=11.9 K 2024-12-16T17:59:17,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/3b5297844056490097b7493934d908b9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3b5297844056490097b7493934d908b9 2024-12-16T17:59:17,799 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3b5297844056490097b7493934d908b9, entries=150, sequenceid=230, filesize=11.9 K 2024-12-16T17:59:17,800 INFO [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 7be8b4dcb0e9f81dbd68149495fe8709 in 852ms, sequenceid=230, compaction requested=false 2024-12-16T17:59:17,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:17,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:17,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3609ad07831c:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-16T17:59:17,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-16T17:59:17,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-16T17:59:17,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2250 sec 2024-12-16T17:59:17,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.2290 sec 2024-12-16T17:59:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:17,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:59:17,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:17,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:17,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:17,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:17,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:17,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:17,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372017859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372017861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372017862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372017862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372017863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216cbb1a7fb6ebd487eaff1e3f1351cf828_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371957742/Put/seqid=0 2024-12-16T17:59:17,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742433_1609 (size=12304) 2024-12-16T17:59:17,878 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:17,880 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216cbb1a7fb6ebd487eaff1e3f1351cf828_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216cbb1a7fb6ebd487eaff1e3f1351cf828_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:17,881 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1b5c27f80bc64cf0a27fdbfdfa56dce4, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:17,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1b5c27f80bc64cf0a27fdbfdfa56dce4 is 175, key is test_row_0/A:col10/1734371957742/Put/seqid=0 2024-12-16T17:59:17,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742434_1610 (size=31105) 2024-12-16T17:59:17,884 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=246, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1b5c27f80bc64cf0a27fdbfdfa56dce4 2024-12-16T17:59:17,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/948bd8f14f3f4e96a60572bc3575a135 is 50, key is test_row_0/B:col10/1734371957742/Put/seqid=0 2024-12-16T17:59:17,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742435_1611 (size=12151) 2024-12-16T17:59:17,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/948bd8f14f3f4e96a60572bc3575a135 2024-12-16T17:59:17,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/286f534b6cb6460fab83b8641863565d is 50, key is test_row_0/C:col10/1734371957742/Put/seqid=0 2024-12-16T17:59:17,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742436_1612 (size=12151) 2024-12-16T17:59:17,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372017963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372017966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372017967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372017967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:17,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:17,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372017967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372018166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372018169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372018170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372018170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372018170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/286f534b6cb6460fab83b8641863565d 2024-12-16T17:59:18,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/1b5c27f80bc64cf0a27fdbfdfa56dce4 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1b5c27f80bc64cf0a27fdbfdfa56dce4 2024-12-16T17:59:18,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1b5c27f80bc64cf0a27fdbfdfa56dce4, entries=150, sequenceid=246, filesize=30.4 K 2024-12-16T17:59:18,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/948bd8f14f3f4e96a60572bc3575a135 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/948bd8f14f3f4e96a60572bc3575a135 2024-12-16T17:59:18,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/948bd8f14f3f4e96a60572bc3575a135, entries=150, sequenceid=246, filesize=11.9 K 2024-12-16T17:59:18,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/286f534b6cb6460fab83b8641863565d as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/286f534b6cb6460fab83b8641863565d 2024-12-16T17:59:18,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/286f534b6cb6460fab83b8641863565d, entries=150, sequenceid=246, filesize=11.9 K 2024-12-16T17:59:18,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7be8b4dcb0e9f81dbd68149495fe8709 in 469ms, sequenceid=246, compaction requested=true 2024-12-16T17:59:18,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:18,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:59:18,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:18,318 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:18,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:B, priority=-2147483648, current under compaction store size is 2 2024-12-16T17:59:18,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:18,318 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:18,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:C, priority=-2147483648, current under compaction store size is 3 2024-12-16T17:59:18,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:18,319 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:18,319 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:18,319 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/B is initiating minor compaction (all files) 2024-12-16T17:59:18,319 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/A is initiating minor compaction (all files) 2024-12-16T17:59:18,319 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/B in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:18,319 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/A in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:18,319 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7b24d75874e043dc870454ae65f1e857, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/e013c3af51cb4046aa04f3a06e9903bd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/948bd8f14f3f4e96a60572bc3575a135] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=36.0 K 2024-12-16T17:59:18,319 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/078759a43c214e47bc29467cc61c8045, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a3c130e384034f09a0c6975b2ce836aa, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1b5c27f80bc64cf0a27fdbfdfa56dce4] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=91.6 K 2024-12-16T17:59:18,319 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:18,319 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. files: [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/078759a43c214e47bc29467cc61c8045, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a3c130e384034f09a0c6975b2ce836aa, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1b5c27f80bc64cf0a27fdbfdfa56dce4] 2024-12-16T17:59:18,319 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 078759a43c214e47bc29467cc61c8045, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371955585 2024-12-16T17:59:18,319 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b24d75874e043dc870454ae65f1e857, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371955585 2024-12-16T17:59:18,320 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting e013c3af51cb4046aa04f3a06e9903bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734371955610 2024-12-16T17:59:18,320 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3c130e384034f09a0c6975b2ce836aa, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734371955610 2024-12-16T17:59:18,320 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b5c27f80bc64cf0a27fdbfdfa56dce4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1734371957742 2024-12-16T17:59:18,320 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 948bd8f14f3f4e96a60572bc3575a135, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1734371957742 2024-12-16T17:59:18,325 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:18,326 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#B#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:18,327 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/14cc1b1874ca4241806d2bf8a8b396ba is 50, key is test_row_0/B:col10/1734371957742/Put/seqid=0 2024-12-16T17:59:18,329 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241216a6dd250367b445129b0a4d31ad855785_7be8b4dcb0e9f81dbd68149495fe8709 store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:18,330 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241216a6dd250367b445129b0a4d31ad855785_7be8b4dcb0e9f81dbd68149495fe8709, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:18,331 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216a6dd250367b445129b0a4d31ad855785_7be8b4dcb0e9f81dbd68149495fe8709 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:18,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742437_1613 (size=12697) 2024-12-16T17:59:18,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742438_1614 (size=4469) 2024-12-16T17:59:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:18,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-16T17:59:18,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:18,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:18,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:18,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:18,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:18,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:18,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168d074cb747524407b7a3fc88fa503515_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371958470/Put/seqid=0 2024-12-16T17:59:18,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372018476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372018476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372018477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742439_1615 (size=12454) 2024-12-16T17:59:18,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372018478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372018478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372018579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372018580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372018581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372018581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372018581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,631 DEBUG [Thread-2443 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63ce3f67 to 127.0.0.1:49190 2024-12-16T17:59:18,631 DEBUG [Thread-2439 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b87baa3 to 127.0.0.1:49190 2024-12-16T17:59:18,631 DEBUG [Thread-2437 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x043c1ec4 to 127.0.0.1:49190 2024-12-16T17:59:18,631 DEBUG [Thread-2443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:18,631 DEBUG [Thread-2439 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:18,631 DEBUG [Thread-2437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:18,633 DEBUG [Thread-2441 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f03b85b to 127.0.0.1:49190 2024-12-16T17:59:18,633 DEBUG [Thread-2441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:18,633 DEBUG [Thread-2445 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58752c73 to 127.0.0.1:49190 2024-12-16T17:59:18,633 DEBUG [Thread-2445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:18,741 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#A#compaction#520 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:18,742 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/b09a11ddae8943a2bad863180e62330c is 175, key is test_row_0/A:col10/1734371957742/Put/seqid=0 2024-12-16T17:59:18,746 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/14cc1b1874ca4241806d2bf8a8b396ba as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/14cc1b1874ca4241806d2bf8a8b396ba 2024-12-16T17:59:18,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742440_1616 (size=31651) 2024-12-16T17:59:18,750 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/B of 7be8b4dcb0e9f81dbd68149495fe8709 into 14cc1b1874ca4241806d2bf8a8b396ba(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:18,751 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:18,751 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/B, priority=13, startTime=1734371958318; duration=0sec 2024-12-16T17:59:18,751 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-16T17:59:18,751 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:B 2024-12-16T17:59:18,751 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T17:59:18,752 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T17:59:18,752 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1540): 7be8b4dcb0e9f81dbd68149495fe8709/C is initiating minor compaction (all files) 2024-12-16T17:59:18,752 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7be8b4dcb0e9f81dbd68149495fe8709/C in TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:18,752 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/04890b459c2342ad9ec3711d6e338e4f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3b5297844056490097b7493934d908b9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/286f534b6cb6460fab83b8641863565d] into tmpdir=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp, totalSize=36.0 K 2024-12-16T17:59:18,752 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 04890b459c2342ad9ec3711d6e338e4f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734371955585 2024-12-16T17:59:18,752 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b5297844056490097b7493934d908b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734371955610 2024-12-16T17:59:18,753 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] compactions.Compactor(224): Compacting 286f534b6cb6460fab83b8641863565d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1734371957742 2024-12-16T17:59:18,764 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7be8b4dcb0e9f81dbd68149495fe8709#C#compaction#522 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T17:59:18,765 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/e6fe8cd642a247168c93212cc1fa474b is 50, key is test_row_0/C:col10/1734371957742/Put/seqid=0 2024-12-16T17:59:18,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742441_1617 (size=12697) 2024-12-16T17:59:18,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372018782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372018782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372018783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372018784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372018784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:18,881 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:18,883 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168d074cb747524407b7a3fc88fa503515_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168d074cb747524407b7a3fc88fa503515_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:18,884 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/4e543bf6a8624a65ace2a0941b6042fd, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:18,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/4e543bf6a8624a65ace2a0941b6042fd is 175, key is test_row_0/A:col10/1734371958470/Put/seqid=0 2024-12-16T17:59:18,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742442_1618 (size=31255) 2024-12-16T17:59:19,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372019085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372019087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372019087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372019087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372019088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,158 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/b09a11ddae8943a2bad863180e62330c as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/b09a11ddae8943a2bad863180e62330c 2024-12-16T17:59:19,164 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/A of 7be8b4dcb0e9f81dbd68149495fe8709 into b09a11ddae8943a2bad863180e62330c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:19,164 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:19,164 INFO [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/A, priority=13, startTime=1734371958318; duration=0sec 2024-12-16T17:59:19,164 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:19,164 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:A 2024-12-16T17:59:19,173 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/e6fe8cd642a247168c93212cc1fa474b as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6fe8cd642a247168c93212cc1fa474b 2024-12-16T17:59:19,179 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7be8b4dcb0e9f81dbd68149495fe8709/C of 7be8b4dcb0e9f81dbd68149495fe8709 into e6fe8cd642a247168c93212cc1fa474b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T17:59:19,179 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:19,179 INFO [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709., storeName=7be8b4dcb0e9f81dbd68149495fe8709/C, priority=13, startTime=1734371958318; duration=0sec 2024-12-16T17:59:19,179 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:19,179 DEBUG [RS:0;3609ad07831c:39733-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:C 2024-12-16T17:59:19,289 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=270, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/4e543bf6a8624a65ace2a0941b6042fd 2024-12-16T17:59:19,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/15b09166e63d49a79bbf473779619534 is 50, key is test_row_0/B:col10/1734371958470/Put/seqid=0 2024-12-16T17:59:19,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742443_1619 (size=12301) 2024-12-16T17:59:19,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47476 deadline: 1734372019589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47466 deadline: 1734372019590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47450 deadline: 1734372019590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1734372019591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T17:59:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47440 deadline: 1734372019592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 2024-12-16T17:59:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-16T17:59:19,682 INFO [Thread-2436 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-16T17:59:19,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/15b09166e63d49a79bbf473779619534 2024-12-16T17:59:19,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/3acb76e9f03640359157c135eeef476a is 50, key is test_row_0/C:col10/1734371958470/Put/seqid=0 2024-12-16T17:59:19,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742444_1620 (size=12301) 2024-12-16T17:59:20,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/3acb76e9f03640359157c135eeef476a 2024-12-16T17:59:20,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/4e543bf6a8624a65ace2a0941b6042fd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/4e543bf6a8624a65ace2a0941b6042fd 2024-12-16T17:59:20,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/4e543bf6a8624a65ace2a0941b6042fd, entries=150, sequenceid=270, filesize=30.5 K 2024-12-16T17:59:20,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/15b09166e63d49a79bbf473779619534 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/15b09166e63d49a79bbf473779619534 2024-12-16T17:59:20,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/15b09166e63d49a79bbf473779619534, entries=150, sequenceid=270, filesize=12.0 K 2024-12-16T17:59:20,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/3acb76e9f03640359157c135eeef476a as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3acb76e9f03640359157c135eeef476a 2024-12-16T17:59:20,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3acb76e9f03640359157c135eeef476a, entries=150, sequenceid=270, filesize=12.0 K 2024-12-16T17:59:20,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1674ms, sequenceid=270, compaction requested=false 2024-12-16T17:59:20,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:20,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39733 {}] regionserver.HRegion(8581): Flush requested on 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:20,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-16T17:59:20,597 DEBUG [Thread-2428 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5710ea10 to 127.0.0.1:49190 2024-12-16T17:59:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:20,598 DEBUG [Thread-2428 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:20,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:20,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:20,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:20,600 DEBUG [Thread-2426 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c974465 to 127.0.0.1:49190 2024-12-16T17:59:20,600 DEBUG [Thread-2426 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:20,602 DEBUG [Thread-2432 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x55aec38f to 127.0.0.1:49190 2024-12-16T17:59:20,602 DEBUG [Thread-2432 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:20,602 DEBUG [Thread-2430 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c219434 to 127.0.0.1:49190 2024-12-16T17:59:20,602 DEBUG [Thread-2434 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3051aceb to 127.0.0.1:49190 2024-12-16T17:59:20,602 DEBUG [Thread-2430 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:20,603 DEBUG [Thread-2434 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8073 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7784 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7682 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8021 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7817 2024-12-16T17:59:20,603 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-16T17:59:20,603 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T17:59:20,603 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4766354f to 127.0.0.1:49190 2024-12-16T17:59:20,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:20,603 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-16T17:59:20,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-16T17:59:20,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-16T17:59:20,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T17:59:20,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121645c8d30d83c24c629efc5036df6327fc_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371958477/Put/seqid=0 2024-12-16T17:59:20,606 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371960606"}]},"ts":"1734371960606"} 2024-12-16T17:59:20,607 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-16T17:59:20,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742445_1621 (size=12454) 2024-12-16T17:59:20,650 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-16T17:59:20,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-16T17:59:20,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, UNASSIGN}] 2024-12-16T17:59:20,654 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, UNASSIGN 2024-12-16T17:59:20,655 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=CLOSING, regionLocation=3609ad07831c,39733,1734371789085 2024-12-16T17:59:20,657 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T17:59:20,657 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; CloseRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085}] 2024-12-16T17:59:20,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T17:59:20,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 3609ad07831c,39733,1734371789085 2024-12-16T17:59:20,810 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(124): Close 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:20,811 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T17:59:20,811 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1681): Closing 7be8b4dcb0e9f81dbd68149495fe8709, disabling compactions & flushes 2024-12-16T17:59:20,811 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:20,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T17:59:21,009 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:21,012 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121645c8d30d83c24c629efc5036df6327fc_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121645c8d30d83c24c629efc5036df6327fc_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:21,012 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/30e1895cfdac45be97406bd7847ebc3e, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:21,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/30e1895cfdac45be97406bd7847ebc3e is 175, key is test_row_0/A:col10/1734371958477/Put/seqid=0 2024-12-16T17:59:21,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742446_1622 (size=31255) 2024-12-16T17:59:21,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T17:59:21,417 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/30e1895cfdac45be97406bd7847ebc3e 2024-12-16T17:59:21,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/8f1be81a3c4648f3afb4dd227b32c2a8 is 50, key is test_row_0/B:col10/1734371958477/Put/seqid=0 2024-12-16T17:59:21,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742447_1623 (size=12301) 2024-12-16T17:59:21,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T17:59:21,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/8f1be81a3c4648f3afb4dd227b32c2a8 2024-12-16T17:59:21,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/f2cb81c6d33f4077b4df0fe8fae2f715 is 50, key is test_row_0/C:col10/1734371958477/Put/seqid=0 2024-12-16T17:59:21,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742448_1624 (size=12301) 2024-12-16T17:59:22,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/f2cb81c6d33f4077b4df0fe8fae2f715 2024-12-16T17:59:22,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/30e1895cfdac45be97406bd7847ebc3e as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/30e1895cfdac45be97406bd7847ebc3e 2024-12-16T17:59:22,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/30e1895cfdac45be97406bd7847ebc3e, entries=150, sequenceid=286, filesize=30.5 K 2024-12-16T17:59:22,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/8f1be81a3c4648f3afb4dd227b32c2a8 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/8f1be81a3c4648f3afb4dd227b32c2a8 2024-12-16T17:59:22,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/8f1be81a3c4648f3afb4dd227b32c2a8, entries=150, sequenceid=286, filesize=12.0 K 2024-12-16T17:59:22,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/f2cb81c6d33f4077b4df0fe8fae2f715 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/f2cb81c6d33f4077b4df0fe8fae2f715 2024-12-16T17:59:22,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/f2cb81c6d33f4077b4df0fe8fae2f715, entries=150, sequenceid=286, filesize=12.0 K 2024-12-16T17:59:22,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=26.84 KB/27480 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1674ms, sequenceid=286, compaction requested=true 2024-12-16T17:59:22,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:22,271 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:22,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:A, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:59:22,271 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:22,271 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. because compaction request was cancelled 2024-12-16T17:59:22,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:22,271 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. after waiting 0 ms 2024-12-16T17:59:22,271 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:A 2024-12-16T17:59:22,271 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:22,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:B, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:59:22,271 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. because compaction request was cancelled 2024-12-16T17:59:22,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:22,271 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:B 2024-12-16T17:59:22,271 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. because compaction request was cancelled 2024-12-16T17:59:22,271 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(2837): Flushing 7be8b4dcb0e9f81dbd68149495fe8709 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-16T17:59:22,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7be8b4dcb0e9f81dbd68149495fe8709:C, priority=-2147483648, current under compaction store size is 1 2024-12-16T17:59:22,271 DEBUG [RS:0;3609ad07831c:39733-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7be8b4dcb0e9f81dbd68149495fe8709:C 2024-12-16T17:59:22,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T17:59:22,271 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=A 2024-12-16T17:59:22,272 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:22,272 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=B 2024-12-16T17:59:22,272 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:22,272 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7be8b4dcb0e9f81dbd68149495fe8709, store=C 2024-12-16T17:59:22,272 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-16T17:59:22,277 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121643dfa9f1a3464204b78912262ffdf65f_7be8b4dcb0e9f81dbd68149495fe8709 is 50, key is test_row_0/A:col10/1734371960599/Put/seqid=0 2024-12-16T17:59:22,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742449_1625 (size=9914) 2024-12-16T17:59:22,683 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T17:59:22,690 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121643dfa9f1a3464204b78912262ffdf65f_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121643dfa9f1a3464204b78912262ffdf65f_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:22,691 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/bab233e32903466fb3606b86a5c0a5cd, store: [table=TestAcidGuarantees family=A region=7be8b4dcb0e9f81dbd68149495fe8709] 2024-12-16T17:59:22,692 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/bab233e32903466fb3606b86a5c0a5cd is 175, key is test_row_0/A:col10/1734371960599/Put/seqid=0 2024-12-16T17:59:22,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742450_1626 (size=22561) 2024-12-16T17:59:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T17:59:23,099 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/bab233e32903466fb3606b86a5c0a5cd 2024-12-16T17:59:23,131 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7cf215c884df41b2b42296bf8316ba64 is 50, key is test_row_0/B:col10/1734371960599/Put/seqid=0 2024-12-16T17:59:23,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742451_1627 (size=9857) 2024-12-16T17:59:23,537 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7cf215c884df41b2b42296bf8316ba64 2024-12-16T17:59:23,549 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/d389a20e39404672b53150572d0a7a93 is 50, key is test_row_0/C:col10/1734371960599/Put/seqid=0 2024-12-16T17:59:23,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742452_1628 (size=9857) 2024-12-16T17:59:23,955 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/d389a20e39404672b53150572d0a7a93 2024-12-16T17:59:23,962 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/A/bab233e32903466fb3606b86a5c0a5cd as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/bab233e32903466fb3606b86a5c0a5cd 2024-12-16T17:59:23,966 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/bab233e32903466fb3606b86a5c0a5cd, entries=100, sequenceid=293, filesize=22.0 K 2024-12-16T17:59:23,967 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/B/7cf215c884df41b2b42296bf8316ba64 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7cf215c884df41b2b42296bf8316ba64 2024-12-16T17:59:23,972 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7cf215c884df41b2b42296bf8316ba64, entries=100, sequenceid=293, filesize=9.6 K 2024-12-16T17:59:23,973 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/.tmp/C/d389a20e39404672b53150572d0a7a93 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/d389a20e39404672b53150572d0a7a93 2024-12-16T17:59:23,978 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/d389a20e39404672b53150572d0a7a93, entries=100, sequenceid=293, filesize=9.6 K 2024-12-16T17:59:23,978 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 7be8b4dcb0e9f81dbd68149495fe8709 in 1707ms, sequenceid=293, compaction requested=true 2024-12-16T17:59:23,979 DEBUG [StoreCloser-TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1ed1223a6790412e90e76d0b6198fa3d, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/40d17e76e6bb4faca63a3d0ecc29a7a7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/2b6f7b9a373844e1a3c4c345a10b83f8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/19c41e20311d46edbbc336facf20d86a, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/def54049670243a88e25915f9e683774, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/32506f7da57d471d8c2c14c2ed856acc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/c17282290f744410b475b078c681d763, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/654d1d03ff3d4462b2e814e1350d907f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/e263399596344eb485b3d79af5977c1b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/fa7cf489b14244e5ad54681da5c328cb, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/67cac75738674822b7f260b132d6d124, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/f76cf7a2aa7d492b891eb9a1e305f31b, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a7a24fea12af419cb4678647f565cef4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/078759a43c214e47bc29467cc61c8045, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a3c130e384034f09a0c6975b2ce836aa, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1b5c27f80bc64cf0a27fdbfdfa56dce4] to archive 2024-12-16T17:59:23,980 DEBUG [StoreCloser-TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:59:23,983 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/2b6f7b9a373844e1a3c4c345a10b83f8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/2b6f7b9a373844e1a3c4c345a10b83f8 2024-12-16T17:59:23,983 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1ed1223a6790412e90e76d0b6198fa3d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1ed1223a6790412e90e76d0b6198fa3d 2024-12-16T17:59:23,983 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/19c41e20311d46edbbc336facf20d86a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/19c41e20311d46edbbc336facf20d86a 2024-12-16T17:59:23,983 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/c17282290f744410b475b078c681d763 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/c17282290f744410b475b078c681d763 2024-12-16T17:59:23,984 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/40d17e76e6bb4faca63a3d0ecc29a7a7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/40d17e76e6bb4faca63a3d0ecc29a7a7 2024-12-16T17:59:23,984 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/def54049670243a88e25915f9e683774 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/def54049670243a88e25915f9e683774 2024-12-16T17:59:23,984 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/654d1d03ff3d4462b2e814e1350d907f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/654d1d03ff3d4462b2e814e1350d907f 2024-12-16T17:59:23,984 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/32506f7da57d471d8c2c14c2ed856acc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/32506f7da57d471d8c2c14c2ed856acc 2024-12-16T17:59:23,986 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/e263399596344eb485b3d79af5977c1b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/e263399596344eb485b3d79af5977c1b 2024-12-16T17:59:23,986 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/fa7cf489b14244e5ad54681da5c328cb to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/fa7cf489b14244e5ad54681da5c328cb 2024-12-16T17:59:23,986 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/67cac75738674822b7f260b132d6d124 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/67cac75738674822b7f260b132d6d124 2024-12-16T17:59:23,986 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/f76cf7a2aa7d492b891eb9a1e305f31b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/f76cf7a2aa7d492b891eb9a1e305f31b 2024-12-16T17:59:23,986 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a3c130e384034f09a0c6975b2ce836aa to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a3c130e384034f09a0c6975b2ce836aa 2024-12-16T17:59:23,986 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/078759a43c214e47bc29467cc61c8045 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/078759a43c214e47bc29467cc61c8045 2024-12-16T17:59:23,987 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a7a24fea12af419cb4678647f565cef4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/a7a24fea12af419cb4678647f565cef4 2024-12-16T17:59:23,987 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1b5c27f80bc64cf0a27fdbfdfa56dce4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/1b5c27f80bc64cf0a27fdbfdfa56dce4 2024-12-16T17:59:23,988 DEBUG [StoreCloser-TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/76372f8dc86349f585e7c81e2b3353fd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/d1f75b2789e040c1bc1e999642231d64, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/57e26f80a1c1428fbe8984bb05ab9f26, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0a2ede75b1394d6394eae707882c5bfc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7e85eff050804fe9baeea2d747ca739c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/dff7b39b0e544f9cac04220be632a910, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/82d47fafb25a4dd59d3d75c2bb42da90, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/52ea84c84bd64ffc9b1a875bcee0eccc, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/2880418b33c34a6c8b1c061dcfaec1c4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0ade4a4441da4359a8717f3bdc0b0236, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/c139640199c142b280fe69e74f662024, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b904dccbea474fa080b7b585ad011a0e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7b24d75874e043dc870454ae65f1e857, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b8851604047145f18936c5800bf36f5c, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/e013c3af51cb4046aa04f3a06e9903bd, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/948bd8f14f3f4e96a60572bc3575a135] to archive 2024-12-16T17:59:23,989 DEBUG [StoreCloser-TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0a2ede75b1394d6394eae707882c5bfc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0a2ede75b1394d6394eae707882c5bfc 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/76372f8dc86349f585e7c81e2b3353fd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/76372f8dc86349f585e7c81e2b3353fd 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/57e26f80a1c1428fbe8984bb05ab9f26 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/57e26f80a1c1428fbe8984bb05ab9f26 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7e85eff050804fe9baeea2d747ca739c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7e85eff050804fe9baeea2d747ca739c 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/d1f75b2789e040c1bc1e999642231d64 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/d1f75b2789e040c1bc1e999642231d64 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/dff7b39b0e544f9cac04220be632a910 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/dff7b39b0e544f9cac04220be632a910 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/82d47fafb25a4dd59d3d75c2bb42da90 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/82d47fafb25a4dd59d3d75c2bb42da90 2024-12-16T17:59:23,991 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/52ea84c84bd64ffc9b1a875bcee0eccc to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/52ea84c84bd64ffc9b1a875bcee0eccc 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/2880418b33c34a6c8b1c061dcfaec1c4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/2880418b33c34a6c8b1c061dcfaec1c4 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0ade4a4441da4359a8717f3bdc0b0236 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/0ade4a4441da4359a8717f3bdc0b0236 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/c139640199c142b280fe69e74f662024 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/c139640199c142b280fe69e74f662024 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7b24d75874e043dc870454ae65f1e857 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7b24d75874e043dc870454ae65f1e857 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b8851604047145f18936c5800bf36f5c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b8851604047145f18936c5800bf36f5c 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b904dccbea474fa080b7b585ad011a0e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/b904dccbea474fa080b7b585ad011a0e 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/e013c3af51cb4046aa04f3a06e9903bd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/e013c3af51cb4046aa04f3a06e9903bd 2024-12-16T17:59:23,992 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/948bd8f14f3f4e96a60572bc3575a135 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/948bd8f14f3f4e96a60572bc3575a135 2024-12-16T17:59:23,993 DEBUG [StoreCloser-TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/235b01cba3ec488ea4d5e39ba1b0dd21, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/637041afafeb4e50a9155f367d4a7358, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a80b9aaca434c8a9e7ca7dc01a227ab, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a1c7458d62a422893df210aa565ad82, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/c5b719d5f681424f8fd79f7e0073543f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6437aec0250e447d9b2892637d6adec7, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/7232039e1bc7415d812fe7c324ea5078, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6396044c5b04173879b029440bc8fc0, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/b50d9eb0b1c748a597c3f1076057e435, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/a5a68d4ce2294ce2a90aea89376c0da8, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6c589bc5aa3b4632a21b38ec1f59eb14, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/1c0ea7523e2b471fa7e5d4802e1cba8e, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/04890b459c2342ad9ec3711d6e338e4f, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/436fc035863f4e99834334dc9628efc4, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3b5297844056490097b7493934d908b9, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/286f534b6cb6460fab83b8641863565d] to archive 2024-12-16T17:59:23,994 DEBUG [StoreCloser-TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T17:59:23,995 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/235b01cba3ec488ea4d5e39ba1b0dd21 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/235b01cba3ec488ea4d5e39ba1b0dd21 2024-12-16T17:59:23,995 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/637041afafeb4e50a9155f367d4a7358 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/637041afafeb4e50a9155f367d4a7358 2024-12-16T17:59:23,996 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a80b9aaca434c8a9e7ca7dc01a227ab to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a80b9aaca434c8a9e7ca7dc01a227ab 2024-12-16T17:59:23,996 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a1c7458d62a422893df210aa565ad82 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/8a1c7458d62a422893df210aa565ad82 2024-12-16T17:59:23,996 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/7232039e1bc7415d812fe7c324ea5078 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/7232039e1bc7415d812fe7c324ea5078 2024-12-16T17:59:23,996 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6437aec0250e447d9b2892637d6adec7 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6437aec0250e447d9b2892637d6adec7 2024-12-16T17:59:23,996 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6396044c5b04173879b029440bc8fc0 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6396044c5b04173879b029440bc8fc0 2024-12-16T17:59:23,996 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/c5b719d5f681424f8fd79f7e0073543f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/c5b719d5f681424f8fd79f7e0073543f 2024-12-16T17:59:23,997 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/b50d9eb0b1c748a597c3f1076057e435 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/b50d9eb0b1c748a597c3f1076057e435 2024-12-16T17:59:23,997 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3b5297844056490097b7493934d908b9 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3b5297844056490097b7493934d908b9 2024-12-16T17:59:23,997 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/a5a68d4ce2294ce2a90aea89376c0da8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/a5a68d4ce2294ce2a90aea89376c0da8 2024-12-16T17:59:23,997 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/436fc035863f4e99834334dc9628efc4 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/436fc035863f4e99834334dc9628efc4 2024-12-16T17:59:23,998 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/04890b459c2342ad9ec3711d6e338e4f to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/04890b459c2342ad9ec3711d6e338e4f 2024-12-16T17:59:23,998 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6c589bc5aa3b4632a21b38ec1f59eb14 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/6c589bc5aa3b4632a21b38ec1f59eb14 2024-12-16T17:59:23,998 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/1c0ea7523e2b471fa7e5d4802e1cba8e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/1c0ea7523e2b471fa7e5d4802e1cba8e 2024-12-16T17:59:23,998 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/286f534b6cb6460fab83b8641863565d to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/286f534b6cb6460fab83b8641863565d 2024-12-16T17:59:24,001 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/recovered.edits/296.seqid, newMaxSeqId=296, maxSeqId=4 2024-12-16T17:59:24,001 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709. 2024-12-16T17:59:24,001 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1635): Region close journal for 7be8b4dcb0e9f81dbd68149495fe8709: 2024-12-16T17:59:24,002 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(170): Closed 7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,003 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=7be8b4dcb0e9f81dbd68149495fe8709, regionState=CLOSED 2024-12-16T17:59:24,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-16T17:59:24,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseRegionProcedure 7be8b4dcb0e9f81dbd68149495fe8709, server=3609ad07831c,39733,1734371789085 in 3.3460 sec 2024-12-16T17:59:24,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-16T17:59:24,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7be8b4dcb0e9f81dbd68149495fe8709, UNASSIGN in 3.3510 sec 2024-12-16T17:59:24,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-16T17:59:24,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 3.3540 sec 2024-12-16T17:59:24,007 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734371964007"}]},"ts":"1734371964007"} 2024-12-16T17:59:24,007 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-16T17:59:24,042 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-16T17:59:24,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 3.4390 sec 2024-12-16T17:59:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T17:59:24,718 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-16T17:59:24,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-16T17:59:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:59:24,724 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:59:24,725 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:59:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-16T17:59:24,727 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,731 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C, FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/recovered.edits] 2024-12-16T17:59:24,737 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/4e543bf6a8624a65ace2a0941b6042fd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/4e543bf6a8624a65ace2a0941b6042fd 2024-12-16T17:59:24,737 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/30e1895cfdac45be97406bd7847ebc3e to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/30e1895cfdac45be97406bd7847ebc3e 2024-12-16T17:59:24,737 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/b09a11ddae8943a2bad863180e62330c to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/b09a11ddae8943a2bad863180e62330c 2024-12-16T17:59:24,737 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/bab233e32903466fb3606b86a5c0a5cd to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/A/bab233e32903466fb3606b86a5c0a5cd 2024-12-16T17:59:24,742 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/14cc1b1874ca4241806d2bf8a8b396ba to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/14cc1b1874ca4241806d2bf8a8b396ba 2024-12-16T17:59:24,742 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/15b09166e63d49a79bbf473779619534 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/15b09166e63d49a79bbf473779619534 2024-12-16T17:59:24,743 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7cf215c884df41b2b42296bf8316ba64 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/7cf215c884df41b2b42296bf8316ba64 2024-12-16T17:59:24,743 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/8f1be81a3c4648f3afb4dd227b32c2a8 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/B/8f1be81a3c4648f3afb4dd227b32c2a8 2024-12-16T17:59:24,747 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/d389a20e39404672b53150572d0a7a93 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/d389a20e39404672b53150572d0a7a93 2024-12-16T17:59:24,747 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3acb76e9f03640359157c135eeef476a to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/3acb76e9f03640359157c135eeef476a 2024-12-16T17:59:24,747 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6fe8cd642a247168c93212cc1fa474b to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/e6fe8cd642a247168c93212cc1fa474b 2024-12-16T17:59:24,747 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/f2cb81c6d33f4077b4df0fe8fae2f715 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/C/f2cb81c6d33f4077b4df0fe8fae2f715 2024-12-16T17:59:24,751 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/recovered.edits/296.seqid to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709/recovered.edits/296.seqid 2024-12-16T17:59:24,751 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/default/TestAcidGuarantees/7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,752 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-16T17:59:24,752 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-16T17:59:24,753 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-16T17:59:24,761 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121643dfa9f1a3464204b78912262ffdf65f_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121643dfa9f1a3464204b78912262ffdf65f_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,762 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121670914c6b8b1b424ca8b964802312b62f_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121670914c6b8b1b424ca8b964802312b62f_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,762 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216376b8418c6a745599de980258d415bf6_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216376b8418c6a745599de980258d415bf6_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,762 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168c50bceddb0a4fc19183a65b3e9f0ef8_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168c50bceddb0a4fc19183a65b3e9f0ef8_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,762 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121645c8d30d83c24c629efc5036df6327fc_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121645c8d30d83c24c629efc5036df6327fc_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,762 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168d074cb747524407b7a3fc88fa503515_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168d074cb747524407b7a3fc88fa503515_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,762 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168d86f4082ed7470485a94a4763b639e6_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412168d86f4082ed7470485a94a4763b639e6_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,762 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121696f70ed75bdb4240bd3f38e715749901_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121696f70ed75bdb4240bd3f38e715749901_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,763 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412169a534a254bc14050b82e9f3d70a07f1e_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412169a534a254bc14050b82e9f3d70a07f1e_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,763 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a672f28c55ed463aa5c1e94b2935f2f1_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216a672f28c55ed463aa5c1e94b2935f2f1_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,763 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b6e370d3dd2f43c0b64b503eac49b16e_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216b6e370d3dd2f43c0b64b503eac49b16e_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,763 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216cbb1a7fb6ebd487eaff1e3f1351cf828_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216cbb1a7fb6ebd487eaff1e3f1351cf828_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,764 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216e20c943bb02946589cce3800d0721b99_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216e20c943bb02946589cce3800d0721b99_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,764 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216e53ec42e240e4332a559ef79dda98ad7_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216e53ec42e240e4332a559ef79dda98ad7_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,764 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216ec5f021657ee4907a3a28dcf193df012_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216ec5f021657ee4907a3a28dcf193df012_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,764 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216edd959f254774483a4f17b0557853492_7be8b4dcb0e9f81dbd68149495fe8709 to hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241216edd959f254774483a4f17b0557853492_7be8b4dcb0e9f81dbd68149495fe8709 2024-12-16T17:59:24,764 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-16T17:59:24,766 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:59:24,769 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-16T17:59:24,770 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-16T17:59:24,771 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:59:24,771 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-16T17:59:24,771 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734371964771"}]},"ts":"9223372036854775807"} 2024-12-16T17:59:24,773 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T17:59:24,773 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7be8b4dcb0e9f81dbd68149495fe8709, NAME => 'TestAcidGuarantees,,1734371935330.7be8b4dcb0e9f81dbd68149495fe8709.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T17:59:24,773 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-16T17:59:24,773 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734371964773"}]},"ts":"9223372036854775807"} 2024-12-16T17:59:24,774 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-16T17:59:24,810 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-16T17:59:24,812 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 90 msec 2024-12-16T17:59:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38367 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-16T17:59:24,827 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-16T17:59:24,842 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=247 (was 244) - Thread LEAK? -, OpenFileDescriptor=459 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=370 (was 409), ProcessCount=11 (was 11), AvailableMemoryMB=3060 (was 3094) 2024-12-16T17:59:24,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T17:59:24,842 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T17:59:24,842 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x093315ff to 127.0.0.1:49190 2024-12-16T17:59:24,842 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:24,842 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T17:59:24,842 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=702506913, stopped=false 2024-12-16T17:59:24,843 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=3609ad07831c,38367,1734371788356 2024-12-16T17:59:24,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T17:59:24,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T17:59:24,850 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T17:59:24,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:59:24,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:59:24,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:24,851 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T17:59:24,851 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T17:59:24,851 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '3609ad07831c,39733,1734371789085' ***** 2024-12-16T17:59:24,851 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T17:59:24,851 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T17:59:24,851 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T17:59:24,851 INFO [RS:0;3609ad07831c:39733 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T17:59:24,852 INFO [RS:0;3609ad07831c:39733 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T17:59:24,852 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(3579): Received CLOSE for a4053c31d189c903d02c8274354da0e8 2024-12-16T17:59:24,852 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1224): stopping server 3609ad07831c,39733,1734371789085 2024-12-16T17:59:24,852 DEBUG [RS:0;3609ad07831c:39733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:24,852 INFO [RS:0;3609ad07831c:39733 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T17:59:24,852 INFO [RS:0;3609ad07831c:39733 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T17:59:24,852 INFO [RS:0;3609ad07831c:39733 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T17:59:24,852 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T17:59:24,852 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing a4053c31d189c903d02c8274354da0e8, disabling compactions & flushes 2024-12-16T17:59:24,853 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:59:24,853 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:59:24,853 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. after waiting 0 ms 2024-12-16T17:59:24,853 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-16T17:59:24,853 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:59:24,853 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1603): Online Regions={a4053c31d189c903d02c8274354da0e8=hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8., 1588230740=hbase:meta,,1.1588230740} 2024-12-16T17:59:24,853 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing a4053c31d189c903d02c8274354da0e8 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T17:59:24,853 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T17:59:24,853 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T17:59:24,853 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T17:59:24,853 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T17:59:24,853 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T17:59:24,853 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-16T17:59:24,853 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, a4053c31d189c903d02c8274354da0e8 2024-12-16T17:59:24,870 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8/.tmp/info/feb9d4635e5049bc8e38b59caa9134fe is 45, key is default/info:d/1734371794285/Put/seqid=0 2024-12-16T17:59:24,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742453_1629 (size=5037) 2024-12-16T17:59:24,874 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/info/0fb68b8f0d864aa99f475b637d4f7d93 is 143, key is hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8./info:regioninfo/1734371794214/Put/seqid=0 2024-12-16T17:59:24,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742454_1630 (size=7725) 2024-12-16T17:59:24,906 INFO [regionserver/3609ad07831c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T17:59:25,054 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, a4053c31d189c903d02c8274354da0e8 2024-12-16T17:59:25,254 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, a4053c31d189c903d02c8274354da0e8 2024-12-16T17:59:25,273 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8/.tmp/info/feb9d4635e5049bc8e38b59caa9134fe 2024-12-16T17:59:25,277 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/info/0fb68b8f0d864aa99f475b637d4f7d93 2024-12-16T17:59:25,278 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8/.tmp/info/feb9d4635e5049bc8e38b59caa9134fe as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8/info/feb9d4635e5049bc8e38b59caa9134fe 2024-12-16T17:59:25,283 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8/info/feb9d4635e5049bc8e38b59caa9134fe, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T17:59:25,284 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for a4053c31d189c903d02c8274354da0e8 in 431ms, sequenceid=6, compaction requested=false 2024-12-16T17:59:25,288 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/namespace/a4053c31d189c903d02c8274354da0e8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T17:59:25,288 INFO [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:59:25,288 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for a4053c31d189c903d02c8274354da0e8: 2024-12-16T17:59:25,289 DEBUG [RS_CLOSE_REGION-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734371793330.a4053c31d189c903d02c8274354da0e8. 2024-12-16T17:59:25,300 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/rep_barrier/85e975e9ffa449c98fe92c6f94453bb9 is 102, key is TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705./rep_barrier:/1734371820991/DeleteFamily/seqid=0 2024-12-16T17:59:25,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742455_1631 (size=6025) 2024-12-16T17:59:25,454 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-16T17:59:25,655 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-16T17:59:25,703 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/rep_barrier/85e975e9ffa449c98fe92c6f94453bb9 2024-12-16T17:59:25,724 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/table/52e2a499ba4442c39573d6571e3bd187 is 96, key is TestAcidGuarantees,,1734371794518.99318ad6c4e7b8782230d738424ff705./table:/1734371820991/DeleteFamily/seqid=0 2024-12-16T17:59:25,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742456_1632 (size=5942) 2024-12-16T17:59:25,809 INFO [regionserver/3609ad07831c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-16T17:59:25,810 INFO [regionserver/3609ad07831c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-16T17:59:25,855 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-16T17:59:25,855 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-16T17:59:25,856 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-16T17:59:26,056 DEBUG [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-16T17:59:26,129 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/table/52e2a499ba4442c39573d6571e3bd187 2024-12-16T17:59:26,138 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/info/0fb68b8f0d864aa99f475b637d4f7d93 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/info/0fb68b8f0d864aa99f475b637d4f7d93 2024-12-16T17:59:26,142 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/info/0fb68b8f0d864aa99f475b637d4f7d93, entries=22, sequenceid=93, filesize=7.5 K 2024-12-16T17:59:26,143 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/rep_barrier/85e975e9ffa449c98fe92c6f94453bb9 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/rep_barrier/85e975e9ffa449c98fe92c6f94453bb9 2024-12-16T17:59:26,146 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/rep_barrier/85e975e9ffa449c98fe92c6f94453bb9, entries=6, sequenceid=93, filesize=5.9 K 2024-12-16T17:59:26,147 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/.tmp/table/52e2a499ba4442c39573d6571e3bd187 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/table/52e2a499ba4442c39573d6571e3bd187 2024-12-16T17:59:26,150 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/table/52e2a499ba4442c39573d6571e3bd187, entries=9, sequenceid=93, filesize=5.8 K 2024-12-16T17:59:26,151 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1297ms, sequenceid=93, compaction requested=false 2024-12-16T17:59:26,155 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-16T17:59:26,155 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T17:59:26,155 INFO [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T17:59:26,156 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T17:59:26,156 DEBUG [RS_CLOSE_META-regionserver/3609ad07831c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T17:59:26,256 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1250): stopping server 3609ad07831c,39733,1734371789085; all regions closed. 2024-12-16T17:59:26,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741834_1010 (size=26050) 2024-12-16T17:59:26,268 DEBUG [RS:0;3609ad07831c:39733 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/oldWALs 2024-12-16T17:59:26,268 INFO [RS:0;3609ad07831c:39733 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 3609ad07831c%2C39733%2C1734371789085.meta:.meta(num 1734371793072) 2024-12-16T17:59:26,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741832_1008 (size=14775402) 2024-12-16T17:59:26,273 DEBUG [RS:0;3609ad07831c:39733 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/oldWALs 2024-12-16T17:59:26,273 INFO [RS:0;3609ad07831c:39733 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 3609ad07831c%2C39733%2C1734371789085:(num 1734371791958) 2024-12-16T17:59:26,273 DEBUG [RS:0;3609ad07831c:39733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:26,273 INFO [RS:0;3609ad07831c:39733 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T17:59:26,273 INFO [RS:0;3609ad07831c:39733 {}] hbase.ChoreService(370): Chore service for: regionserver/3609ad07831c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T17:59:26,274 INFO [regionserver/3609ad07831c:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T17:59:26,274 INFO [RS:0;3609ad07831c:39733 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39733 2024-12-16T17:59:26,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3609ad07831c,39733,1734371789085 2024-12-16T17:59:26,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T17:59:26,317 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3609ad07831c,39733,1734371789085] 2024-12-16T17:59:26,317 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 3609ad07831c,39733,1734371789085; numProcessing=1 2024-12-16T17:59:26,325 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/3609ad07831c,39733,1734371789085 already deleted, retry=false 2024-12-16T17:59:26,325 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 3609ad07831c,39733,1734371789085 expired; onlineServers=0 2024-12-16T17:59:26,326 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '3609ad07831c,38367,1734371788356' ***** 2024-12-16T17:59:26,326 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T17:59:26,326 DEBUG [M:0;3609ad07831c:38367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6956a245, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3609ad07831c/172.17.0.2:0 2024-12-16T17:59:26,327 INFO [M:0;3609ad07831c:38367 {}] regionserver.HRegionServer(1224): stopping server 3609ad07831c,38367,1734371788356 2024-12-16T17:59:26,327 INFO [M:0;3609ad07831c:38367 {}] regionserver.HRegionServer(1250): stopping server 3609ad07831c,38367,1734371788356; all regions closed. 2024-12-16T17:59:26,327 DEBUG [M:0;3609ad07831c:38367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T17:59:26,327 DEBUG [M:0;3609ad07831c:38367 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T17:59:26,327 DEBUG [M:0;3609ad07831c:38367 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T17:59:26,327 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T17:59:26,327 DEBUG [master/3609ad07831c:0:becomeActiveMaster-HFileCleaner.small.0-1734371791705 {}] cleaner.HFileCleaner(306): Exit Thread[master/3609ad07831c:0:becomeActiveMaster-HFileCleaner.small.0-1734371791705,5,FailOnTimeoutGroup] 2024-12-16T17:59:26,327 DEBUG [master/3609ad07831c:0:becomeActiveMaster-HFileCleaner.large.0-1734371791704 {}] cleaner.HFileCleaner(306): Exit Thread[master/3609ad07831c:0:becomeActiveMaster-HFileCleaner.large.0-1734371791704,5,FailOnTimeoutGroup] 2024-12-16T17:59:26,328 INFO [M:0;3609ad07831c:38367 {}] hbase.ChoreService(370): Chore service for: master/3609ad07831c:0 had [] on shutdown 2024-12-16T17:59:26,328 DEBUG [M:0;3609ad07831c:38367 {}] master.HMaster(1733): Stopping service threads 2024-12-16T17:59:26,328 INFO [M:0;3609ad07831c:38367 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T17:59:26,329 ERROR [M:0;3609ad07831c:38367 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (1600459565) connection to localhost/127.0.0.1:40431 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:40431,5,PEWorkerGroup] 2024-12-16T17:59:26,330 INFO [M:0;3609ad07831c:38367 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T17:59:26,330 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T17:59:26,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T17:59:26,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T17:59:26,334 DEBUG [M:0;3609ad07831c:38367 {}] zookeeper.ZKUtil(347): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T17:59:26,334 WARN [M:0;3609ad07831c:38367 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T17:59:26,334 INFO [M:0;3609ad07831c:38367 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T17:59:26,334 INFO [M:0;3609ad07831c:38367 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T17:59:26,334 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T17:59:26,334 INFO [M:0;3609ad07831c:38367 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:59:26,335 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T17:59:26,335 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:59:26,335 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T17:59:26,335 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:59:26,335 INFO [M:0;3609ad07831c:38367 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=744.82 KB heapSize=913.94 KB 2024-12-16T17:59:26,349 DEBUG [M:0;3609ad07831c:38367 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/045b60db51ab4d7e885f4780a0420264 is 82, key is hbase:meta,,1/info:regioninfo/1734371793183/Put/seqid=0 2024-12-16T17:59:26,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742457_1633 (size=5672) 2024-12-16T17:59:26,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T17:59:26,417 INFO [RS:0;3609ad07831c:39733 {}] regionserver.HRegionServer(1307): Exiting; stopping=3609ad07831c,39733,1734371789085; zookeeper connection closed. 2024-12-16T17:59:26,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39733-0x1002fe073560001, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T17:59:26,418 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5c56573b {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5c56573b 2024-12-16T17:59:26,419 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-16T17:59:26,754 INFO [M:0;3609ad07831c:38367 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2084 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/045b60db51ab4d7e885f4780a0420264 2024-12-16T17:59:26,784 DEBUG [M:0;3609ad07831c:38367 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c98fddb42ecd4025b282d054cb3d1122 is 2283, key is \x00\x00\x00\x00\x00\x00\x00\x92/proc:d/1734371938512/Put/seqid=0 2024-12-16T17:59:26,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742458_1634 (size=43595) 2024-12-16T17:59:27,188 INFO [M:0;3609ad07831c:38367 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=744.27 KB at sequenceid=2084 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c98fddb42ecd4025b282d054cb3d1122 2024-12-16T17:59:27,191 INFO [M:0;3609ad07831c:38367 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c98fddb42ecd4025b282d054cb3d1122 2024-12-16T17:59:27,204 DEBUG [M:0;3609ad07831c:38367 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00c15254ae9a4ad4a46c4261e3bc97a1 is 69, key is 3609ad07831c,39733,1734371789085/rs:state/1734371791727/Put/seqid=0 2024-12-16T17:59:27,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073742459_1635 (size=5156) 2024-12-16T17:59:27,536 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T17:59:27,609 INFO [M:0;3609ad07831c:38367 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2084 (bloomFilter=true), to=hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00c15254ae9a4ad4a46c4261e3bc97a1 2024-12-16T17:59:27,618 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/045b60db51ab4d7e885f4780a0420264 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/045b60db51ab4d7e885f4780a0420264 2024-12-16T17:59:27,623 INFO [M:0;3609ad07831c:38367 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/045b60db51ab4d7e885f4780a0420264, entries=8, sequenceid=2084, filesize=5.5 K 2024-12-16T17:59:27,623 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c98fddb42ecd4025b282d054cb3d1122 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c98fddb42ecd4025b282d054cb3d1122 2024-12-16T17:59:27,626 INFO [M:0;3609ad07831c:38367 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c98fddb42ecd4025b282d054cb3d1122 2024-12-16T17:59:27,626 INFO [M:0;3609ad07831c:38367 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c98fddb42ecd4025b282d054cb3d1122, entries=171, sequenceid=2084, filesize=42.6 K 2024-12-16T17:59:27,627 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00c15254ae9a4ad4a46c4261e3bc97a1 as hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00c15254ae9a4ad4a46c4261e3bc97a1 2024-12-16T17:59:27,629 INFO [M:0;3609ad07831c:38367 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40431/user/jenkins/test-data/cb3f7d40-ec0a-58ee-108c-262ea8dac2b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00c15254ae9a4ad4a46c4261e3bc97a1, entries=1, sequenceid=2084, filesize=5.0 K 2024-12-16T17:59:27,630 INFO [M:0;3609ad07831c:38367 {}] regionserver.HRegion(3040): Finished flush of dataSize ~744.82 KB/762699, heapSize ~913.64 KB/935568, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1295ms, sequenceid=2084, compaction requested=false 2024-12-16T17:59:27,631 INFO [M:0;3609ad07831c:38367 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T17:59:27,631 DEBUG [M:0;3609ad07831c:38367 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T17:59:27,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41817 is added to blk_1073741830_1006 (size=899366) 2024-12-16T17:59:27,633 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T17:59:27,633 INFO [M:0;3609ad07831c:38367 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T17:59:27,634 INFO [M:0;3609ad07831c:38367 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38367 2024-12-16T17:59:27,667 DEBUG [M:0;3609ad07831c:38367 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/3609ad07831c,38367,1734371788356 already deleted, retry=false 2024-12-16T17:59:27,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T17:59:27,776 INFO [M:0;3609ad07831c:38367 {}] regionserver.HRegionServer(1307): Exiting; stopping=3609ad07831c,38367,1734371788356; zookeeper connection closed. 2024-12-16T17:59:27,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38367-0x1002fe073560000, quorum=127.0.0.1:49190, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T17:59:27,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e63fd41{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T17:59:27,791 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f715f5d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T17:59:27,791 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T17:59:27,791 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6311a0d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T17:59:27,791 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72940c9d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/hadoop.log.dir/,STOPPED} 2024-12-16T17:59:27,795 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T17:59:27,795 WARN [BP-1899768724-172.17.0.2-1734371785309 heartbeating to localhost/127.0.0.1:40431 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T17:59:27,795 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T17:59:27,795 WARN [BP-1899768724-172.17.0.2-1734371785309 heartbeating to localhost/127.0.0.1:40431 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1899768724-172.17.0.2-1734371785309 (Datanode Uuid aed29c79-9217-44a3-9ef6-9f4b96769121) service to localhost/127.0.0.1:40431 2024-12-16T17:59:27,797 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/dfs/data/data1/current/BP-1899768724-172.17.0.2-1734371785309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T17:59:27,797 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/cluster_489038c2-cc48-2e45-cfe4-52e70c8038f4/dfs/data/data2/current/BP-1899768724-172.17.0.2-1734371785309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T17:59:27,798 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T17:59:27,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7096be9b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T17:59:27,805 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16dcfbe1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T17:59:27,805 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T17:59:27,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c273041{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T17:59:27,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ad156f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/adfbedd3-683f-aafc-4f85-97b20bf2b38d/hadoop.log.dir/,STOPPED} 2024-12-16T17:59:27,818 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T17:59:27,930 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down